hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77f5a84ee7e8e3eccfcadfcdc022467f83791c14
| 46
|
py
|
Python
|
s2g/__init__.py
|
vishalbelsare/s2g
|
e94a5a99f3bb5a39c574513eb4e343a09f2f6b74
|
[
"MIT"
] | 20
|
2017-02-14T15:26:47.000Z
|
2021-05-11T12:44:40.000Z
|
s2g/__init__.py
|
vishalbelsare/s2g
|
e94a5a99f3bb5a39c574513eb4e343a09f2f6b74
|
[
"MIT"
] | 8
|
2016-12-22T13:01:23.000Z
|
2021-07-15T09:53:54.000Z
|
s2g/__init__.py
|
caesar0301/python-s2g
|
e94a5a99f3bb5a39c574513eb4e343a09f2f6b74
|
[
"MIT"
] | 6
|
2017-02-14T15:29:15.000Z
|
2019-05-03T00:03:27.000Z
|
from .shapegraph import *
from .bonus import *
| 23
| 25
| 0.76087
| 6
| 46
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 26
| 23
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7af00a1b8508c546116696e2fc7a7a3ba73a7757
| 117
|
py
|
Python
|
controllers/reddit/__init__.py
|
atas98/telegram-reddit-bot
|
e021533cb8acaa439ed57dc7e20e1b5a04970af8
|
[
"MIT"
] | 4
|
2021-03-25T09:10:04.000Z
|
2021-09-25T07:04:30.000Z
|
controllers/reddit/__init__.py
|
atas98/telegram-reddit-bot
|
e021533cb8acaa439ed57dc7e20e1b5a04970af8
|
[
"MIT"
] | 2
|
2022-01-10T14:12:31.000Z
|
2022-01-12T22:56:12.000Z
|
controllers/reddit/__init__.py
|
atas98/telegram-reddit-bot
|
e021533cb8acaa439ed57dc7e20e1b5a04970af8
|
[
"MIT"
] | 1
|
2021-12-18T08:28:34.000Z
|
2021-12-18T08:28:34.000Z
|
from .reddit import Reddit, Post_Types, Sort_Types, Post_Data
__all__ = [Post_Types, Sort_Types, Post_Data, Reddit]
| 29.25
| 61
| 0.794872
| 18
| 117
| 4.611111
| 0.444444
| 0.216867
| 0.313253
| 0.433735
| 0.626506
| 0.626506
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 117
| 3
| 62
| 39
| 0.805825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bb0afd931f58257044eb230162b9983c3d129f25
| 67
|
py
|
Python
|
benchmark_function/__init__.py
|
yshimizu12/BenchmarkFunction
|
66ad568d092f2c325d685e10c02942745ed33b07
|
[
"MIT"
] | 2
|
2021-07-16T01:10:43.000Z
|
2021-07-16T04:49:12.000Z
|
benchmark_function/__init__.py
|
yshimizu12/BenchmarkFunction
|
66ad568d092f2c325d685e10c02942745ed33b07
|
[
"MIT"
] | null | null | null |
benchmark_function/__init__.py
|
yshimizu12/BenchmarkFunction
|
66ad568d092f2c325d685e10c02942745ed33b07
|
[
"MIT"
] | 1
|
2021-07-16T05:19:20.000Z
|
2021-07-16T05:19:20.000Z
|
from benchmark_function.benchmark_function import BenchmarkFunction
| 67
| 67
| 0.940299
| 7
| 67
| 8.714286
| 0.714286
| 0.557377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 67
| 1
| 67
| 67
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb1a7712dcfcd284bd95f5d2e8d558248c33fdc0
| 24
|
py
|
Python
|
server/rest/__init__.py
|
Nugetzrul3/ApiServer
|
ee031dd1659331e41f241402afc96cddf9750ced
|
[
"MIT"
] | 1
|
2021-11-07T10:05:23.000Z
|
2021-11-07T10:05:23.000Z
|
server/rest/__init__.py
|
equitypay/api-server
|
6830501351c944c075e4792f941b23c9ff63e029
|
[
"MIT"
] | 3
|
2021-09-05T18:22:42.000Z
|
2021-09-26T06:21:39.000Z
|
server/rest/__init__.py
|
equitypay/api-server
|
6830501351c944c075e4792f941b23c9ff63e029
|
[
"MIT"
] | 2
|
2020-05-19T13:20:00.000Z
|
2021-09-26T04:58:41.000Z
|
from .views import rest
| 12
| 23
| 0.791667
| 4
| 24
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb3d506772c1cc57d6849b74aa95d7c2eff0b12c
| 135
|
py
|
Python
|
am_i_the_asshole/models/__init__.py
|
mirandrom/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | 1
|
2020-10-05T16:39:18.000Z
|
2020-10-05T16:39:18.000Z
|
am_i_the_asshole/models/__init__.py
|
amr-amr/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | null | null | null |
am_i_the_asshole/models/__init__.py
|
amr-amr/am-i-the-asshole
|
e7e4f00aa193931d45e4012db5cc65d3679faa90
|
[
"MIT"
] | null | null | null |
from am_i_the_asshole.models.regressor import AitaRegressor
from am_i_the_asshole.models.bert_sentence_pooler import BertSentencePooler
| 67.5
| 75
| 0.918519
| 20
| 135
| 5.8
| 0.65
| 0.103448
| 0.12069
| 0.172414
| 0.396552
| 0.396552
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051852
| 135
| 2
| 75
| 67.5
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
24b70b52fbe7f2300946db1df09edfa253165ee4
| 264
|
py
|
Python
|
novice/python-unit-testing/code/test_addnumbers.py
|
Southampton-RSG/2019-03-13-southampton-swc
|
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
|
[
"CC-BY-4.0"
] | 1
|
2021-06-20T11:51:37.000Z
|
2021-06-20T11:51:37.000Z
|
novice/python-unit-testing/code/test_addnumbers.py
|
Southampton-RSG/2019-03-13-southampton-swc
|
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
|
[
"CC-BY-4.0"
] | 1
|
2019-09-30T21:15:32.000Z
|
2019-09-30T21:15:32.000Z
|
novice/python-unit-testing/code/test_addnumbers.py
|
Southampton-RSG/2019-03-13-southampton-swc
|
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
|
[
"CC-BY-4.0"
] | null | null | null |
from addnumbers import addnumbers
def test_empty():
assert addnumbers([]) == None
def test_single_value():
assert addnumbers([1]) == 1
def test_two_values():
assert addnumbers([1, 2]) == 3
def test_three_values():
assert addnumbers([1, 2, 3]) == 6
| 18.857143
| 36
| 0.674242
| 37
| 264
| 4.621622
| 0.459459
| 0.163743
| 0.298246
| 0.269006
| 0.292398
| 0.292398
| 0
| 0
| 0
| 0
| 0
| 0.041475
| 0.17803
| 264
| 13
| 37
| 20.307692
| 0.746544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.444444
| true
| 0
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
24eb4007faea88857c9340fa545dcecf01d29c1b
| 155
|
py
|
Python
|
Task/Fibonacci-sequence/Python/fibonacci-sequence-5.py
|
djgoku/RosettaCodeData
|
91df62d46142e921b3eacdb52b0316c39ee236bc
|
[
"Info-ZIP"
] | null | null | null |
Task/Fibonacci-sequence/Python/fibonacci-sequence-5.py
|
djgoku/RosettaCodeData
|
91df62d46142e921b3eacdb52b0316c39ee236bc
|
[
"Info-ZIP"
] | null | null | null |
Task/Fibonacci-sequence/Python/fibonacci-sequence-5.py
|
djgoku/RosettaCodeData
|
91df62d46142e921b3eacdb52b0316c39ee236bc
|
[
"Info-ZIP"
] | null | null | null |
def fibFastRec(n):
def fib(prvprv, prv, c):
if c < 1: return prvprv
else: return fib(prv, prvprv + prv, c - 1)
return fib(0, 1, n)
| 25.833333
| 50
| 0.548387
| 26
| 155
| 3.269231
| 0.461538
| 0.211765
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0.316129
| 155
| 5
| 51
| 31
| 0.764151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
70188086df625a9d5c91f3b0646f5f32305ebfdc
| 48
|
py
|
Python
|
src/senda/__init__.py
|
ebanalyse/senda
|
d40035df455368d3a439055327a7e08b9517d987
|
[
"MIT"
] | 13
|
2021-04-27T12:48:44.000Z
|
2021-11-25T15:31:34.000Z
|
src/senda/__init__.py
|
ebanalyse/senda
|
d40035df455368d3a439055327a7e08b9517d987
|
[
"MIT"
] | null | null | null |
src/senda/__init__.py
|
ebanalyse/senda
|
d40035df455368d3a439055327a7e08b9517d987
|
[
"MIT"
] | null | null | null |
from .Model import *
from .angry_tweets import *
| 24
| 27
| 0.770833
| 7
| 48
| 5.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 27
| 24
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
705b4928d1637203f85768713a5bda242e89b715
| 27
|
py
|
Python
|
cachupy/__init__.py
|
patrickbird/cachupy
|
d1504f3c3129c926bd9897a6660669f146e64c38
|
[
"MIT"
] | null | null | null |
cachupy/__init__.py
|
patrickbird/cachupy
|
d1504f3c3129c926bd9897a6660669f146e64c38
|
[
"MIT"
] | null | null | null |
cachupy/__init__.py
|
patrickbird/cachupy
|
d1504f3c3129c926bd9897a6660669f146e64c38
|
[
"MIT"
] | null | null | null |
from .cachupy import Cache
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7062e1120cb786e7db980520a08fd759ed2faa9d
| 28
|
py
|
Python
|
double3/double3sdk/events/__init__.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
double3/double3sdk/events/__init__.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
double3/double3sdk/events/__init__.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
from .events import _Events
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b51ca829363143c002936e4b2a6ee2de0f98d39
| 117
|
py
|
Python
|
bot/utils/__init__.py
|
Bluenix2/WinterBot
|
a7c546ebe881feabf4f2a97ba65354a9244fed31
|
[
"MIT"
] | 1
|
2021-07-15T09:36:13.000Z
|
2021-07-15T09:36:13.000Z
|
bot/utils/__init__.py
|
Bluenix2/WinterBot
|
a7c546ebe881feabf4f2a97ba65354a9244fed31
|
[
"MIT"
] | null | null | null |
bot/utils/__init__.py
|
Bluenix2/WinterBot
|
a7c546ebe881feabf4f2a97ba65354a9244fed31
|
[
"MIT"
] | null | null | null |
from bot.utils.database import ConnectionUtil, Context
from bot.utils.dependencies import get_app, get_bot, get_conn
| 39
| 61
| 0.846154
| 18
| 117
| 5.333333
| 0.611111
| 0.145833
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094017
| 117
| 2
| 62
| 58.5
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e576c36c004928f38b77f813eed553595c2dcb9
| 69
|
py
|
Python
|
commondtools/network/__init__.py
|
M0Rph3U56031769/commondtools
|
ef29716179f80ec38355e871fa67763c5edfc54c
|
[
"MIT"
] | 1
|
2020-01-27T05:22:48.000Z
|
2020-01-27T05:22:48.000Z
|
commondtools/network/__init__.py
|
M0Rph3U56031769/commondtools
|
ef29716179f80ec38355e871fa67763c5edfc54c
|
[
"MIT"
] | 3
|
2020-03-31T11:05:14.000Z
|
2020-11-17T08:50:41.000Z
|
commondtools/network/__init__.py
|
M0Rph3U56031769/commondtools
|
ef29716179f80ec38355e871fa67763c5edfc54c
|
[
"MIT"
] | null | null | null |
from .ping import *
from .portscan import *
from .validreqs import *
| 17.25
| 24
| 0.73913
| 9
| 69
| 5.666667
| 0.555556
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 69
| 3
| 25
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ec7b274385450d7c653df661612650569057787
| 252
|
py
|
Python
|
mysite/views.py
|
ardassmedia13/my_first_app
|
b63479b65e4bf8360cc95e4d8df57388f6fe7e9a
|
[
"Apache-2.0"
] | null | null | null |
mysite/views.py
|
ardassmedia13/my_first_app
|
b63479b65e4bf8360cc95e4d8df57388f6fe7e9a
|
[
"Apache-2.0"
] | null | null | null |
mysite/views.py
|
ardassmedia13/my_first_app
|
b63479b65e4bf8360cc95e4d8df57388f6fe7e9a
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request,'index.html')
def elements(request):
return render(request,'elements.html')
def generic(request):
return render(request,'test.html')
| 21
| 42
| 0.738095
| 33
| 252
| 5.636364
| 0.515152
| 0.209677
| 0.306452
| 0.419355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 252
| 12
| 43
| 21
| 0.861111
| 0.09127
| 0
| 0
| 0
| 0
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
8edaf2fc1464eabbe86c450a01c10a75f0c02274
| 43,054
|
py
|
Python
|
src/aecg/utils.py
|
FDA/aecg-python
|
561aa3881b51af4d7dfdb2b5030f31e970ba9666
|
[
"CC0-1.0"
] | 2
|
2021-03-04T05:32:19.000Z
|
2021-12-12T04:24:43.000Z
|
src/aecg/utils.py
|
FDA/aecg-python
|
561aa3881b51af4d7dfdb2b5030f31e970ba9666
|
[
"CC0-1.0"
] | null | null | null |
src/aecg/utils.py
|
FDA/aecg-python
|
561aa3881b51af4d7dfdb2b5030f31e970ba9666
|
[
"CC0-1.0"
] | 1
|
2021-06-07T11:07:45.000Z
|
2021-06-07T11:07:45.000Z
|
""" Utility functions for annotated ECG HL7 XML tools
This submodule provides utility functions such as basic printing and plotting.
See authors, license and disclaimer at the top level directory of this project.
"""
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from enum import Enum
from matplotlib import figure
# Python logging ==============================================================
logger = logging.getLogger(__name__)
class ECG_plot_layout(Enum):
"""Supported plot layouts
Args:
Enum: Type of plot layout
"""
#: Leads stacked vertically
STACKED = 1
#: Leads organized in 3 x 4 matrix with full lead II at the bottom
THREExFOURxRHYTHM = 2
#: Leads superimposed on top of each other (a.k.a., butterfly plot)
SUPERIMPOSED = 3
def plot_aecg(rhythm_data: pd.DataFrame,
anns_df: pd.DataFrame = None,
ecgl_plot_layout: ECG_plot_layout = ECG_plot_layout.STACKED,
fig: figure.Figure = None,
dpi: int = 300,
textsize: int = 6,
ecg_linewidth: float = 0.3,
plot_grid: bool = True,
grid_color: str = "#a88332",
v_offset: float = 1.5,
xmin: float = 0.0, xmax: float = 10000.0,
ymin: float = -1.5, ymax: float = 1.5,
x_margin: float = 280,
for_gui: bool = True) -> figure.Figure:
"""Plots the `rhythm_data` waveform and `anns_df` annotations
Args:
rhythm_data (pd.DataFrame): aECG waveform as returned by
:any:`Aecg.rhythm_as_df` or :any:`Aecg.derived_as_df`.
anns_df (pd.DataFrame, optional): aECG annotations. For example,
as returned by pd.DataFrame(the_aecg.DERIVEDANNS[0].anns) where
the_aecg is an :any:`Aecg` object. Defaults to None.
ecgl_plot_layout (ECG_plot_layout, optional): Plot layout. Defaults to
ECG_plot_layout.STACKED.
fig (figure.Figure, optional): Figure containing the plot. Defaults to
None.
dpi (int, optional): Plot resolution in dots per inch (dpi). Defaults
to 300.
textsize (int, optional): Default text fontsize. Defaults to 6.
ecg_linewidth (float, optional): Line width for the ECG waveform.
Defaults to 0.3.
plot_grid (bool, optional): Indicates whether to plot the standard ECG
grid. Defaults to True.
grid_color (str, optional): Color of the ECG grid. Defaults to
"#a88332".
v_offset (float, optional): Vertical offset between leads in mV.
Defaults to 1.5.
xmin (float, optional): X axis minimum value in ms. Defaults to 0.0.
xmax (float, optional): X axis maximum value in ms. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
10000.0.
ymin (float, optional): Y axis minimum value in mV. Defaults to -1.5.
ymax (float, optional): Y axis maximum value in mV. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
1.5.
x_margin (float, optional): Margin on the X axis in ms. Defaults to
280.
for_gui (bool, optional): Indicates whether to plot is generated for
a graphical user interface. If true, the figure will be closed
before returning the object so a canvas will be needed to render it
. Otherwise, the figure will be return immediately. Defaults to
True.
Returns:
figure.Figure: Plot of the aECG waveforms and its annotations
"""
if ecgl_plot_layout == ECG_plot_layout.STACKED:
fig = plot_stdleads_stacked(rhythm_data=rhythm_data, anns_df=anns_df,
fig=fig, dpi=dpi, textsize=textsize,
ecg_linewidth=ecg_linewidth,
plot_grid=plot_grid, grid_color=grid_color,
v_offset=v_offset,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax,
x_margin=x_margin,
for_gui=for_gui)
elif ecgl_plot_layout == ECG_plot_layout.THREExFOURxRHYTHM:
# Plot 3x4 always up to 10 s only
xmax = xmin + 10000.0
fig = plot_stdleads_matrix(rhythm_data=rhythm_data, anns_df=anns_df,
fig=fig, dpi=dpi, textsize=textsize,
ecg_linewidth=ecg_linewidth,
plot_grid=plot_grid, grid_color=grid_color,
v_offset=v_offset,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax,
x_margin=x_margin,
for_gui=for_gui)
elif ecgl_plot_layout == ECG_plot_layout.SUPERIMPOSED:
fig = plot_stdleads_stacked(rhythm_data=rhythm_data, anns_df=anns_df,
fig=fig, dpi=dpi, textsize=textsize,
ecg_linewidth=ecg_linewidth,
plot_grid=plot_grid, grid_color=grid_color,
v_offset=0,
xmin=xmin, xmax=xmax,
ymin=ymin-1.5, ymax=ymax+1.5,
x_margin=x_margin,
for_gui=for_gui)
return fig
def plot_stdleads_stacked(rhythm_data: pd.DataFrame,
anns_df: pd.DataFrame = None,
fig: figure.Figure = None,
dpi: int = 300,
textsize: int = 6,
ecg_linewidth: float = 0.3,
plot_grid: bool = True,
grid_color: str = "#a88332",
v_offset: float = 1.5,
xmin: float = 0.0, xmax: float = 10000.0,
ymin: float = -1.5, ymax: float = 1.5,
x_margin: float = 280,
for_gui: bool = True) -> figure.Figure:
"""Plots the waveform and annotations in a stacked or superimposed layout
Args:
rhythm_data (pd.DataFrame): aECG waveform as returned by
:any:`Aecg.rhythm_as_df` or :any:`Aecg.derived_as_df`.
anns_df (pd.DataFrame, optional): aECG annotations. For example,
as returned by pd.DataFrame(the_aecg.DERIVEDANNS[0].anns) where
the_aecg is an :any:`Aecg` object. Defaults to None.
fig (figure.Figure, optional): Figure containing the plot. Defaults to
None.
dpi (int, optional): Plot resolution in dots per inch (dpi). Defaults
to 300.
textsize (int, optional): Default text fontsize. Defaults to 6.
ecg_linewidth (float, optional): Line width for the ECG waveform.
Defaults to 0.3.
plot_grid (bool, optional): Indicates whether to plot the standard ECG
grid. Defaults to True.
grid_color (str, optional): Color of the ECG grid. Defaults to
"#a88332".
v_offset (float, optional): Vertical offset between leads in mV. Set to
0 For a superimposed layout. Defaults to 1.5.
xmin (float, optional): X axis minimum value in ms. Defaults to 0.0.
xmax (float, optional): X axis maximum value in ms. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
10000.0.
ymin (float, optional): Y axis minimum value in mV. Defaults to -1.5.
ymax (float, optional): Y axis maximum value in mV. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
1.5.
x_margin (float, optional): Margin on the X axis in ms. Defaults to
280.
for_gui (bool, optional): Indicates whether to plot is generated for
a graphical user interface. If true, the figure will be closed
before returning the object so a canvas will be needed to render it
. Otherwise, the figure will be return immediately. Defaults to
True.
Returns:
figure.Figure: Plot of the aECG waveforms and its annotations
"""
# Compute maximum height range based on number of leads
ecg_ymin = min(ymin, -min(12, (rhythm_data.shape[1]-1))*v_offset)
ecg_ymax = max(v_offset, ymax)
# Compute image size
ecg_width = (xmax - xmin + x_margin)/40.0 # mm (25 mm/s -> 1 mm x 0.04s)
ecg_height = (ecg_ymax - ecg_ymin)*10.0 # mm ( 10 mm/mV -> 1 mm x 0.1 mV)
ecg_w_in = ecg_width/25.4 # inches
ecg_h_in = ecg_height/25.4 # inches
# Figure size
if fig is None:
fig = plt.figure()
else:
fig.clear()
fig.set_size_inches(ecg_w_in, ecg_h_in)
fig.set_dpi(dpi)
fig.set_facecolor('w')
fig.set_edgecolor('k')
ax1 = fig.add_axes([0, 0, 1, 1], frameon=False)
# ecg grid
if plot_grid:
grid_major_x = np.arange(xmin, xmax + x_margin, 200.0)
grid_minor_x = np.arange(xmin, xmax + x_margin, 40.0)
for xc in grid_major_x:
ax1.axvline(x=xc, color=grid_color, linewidth=0.5)
for xc in grid_minor_x:
ax1.axvline(x=xc, color=grid_color, linewidth=0.2)
numleads = min(12, len(rhythm_data.columns) - 1)
grid_major_y = np.arange(min(ymin, -numleads * v_offset),
max(v_offset, ymax), 0.5)
grid_minor_y = np.arange(min(ymin, -numleads * v_offset),
max(v_offset, ymax), 0.1)
for yc in grid_major_y:
ax1.axhline(y=yc, color=grid_color, linewidth=0.5)
for yc in grid_minor_y:
ax1.axhline(y=yc, color=grid_color, linewidth=0.1)
# Plot leads stacked with lead I on top and V6 at the bottom
idx = 0
lead_zero = 0
ecglibann_voffset = {"RPEAK": 1.0, "PON": 0.7,
"QON": 0.4, "QOFF": 0.7,
"TOFF": 0.4}
for lead in ["I", "II", "III", "aVR", "aVL", "aVF",
"V1", "V2", "V3", "V4", "V5", "V6"]:
if lead in rhythm_data.columns:
lead_zero = - idx * v_offset
# ecg calibration pulse
ax1.plot([40, 80, 80, 280, 280, 320],
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# lead name
ax1.text(x_margin + 80, lead_zero + 0.55, lead, size=textsize)
ax1.plot(rhythm_data.TIME[rhythm_data[lead].notna()] + x_margin,
rhythm_data[lead][rhythm_data[lead].notna()
].values + lead_zero,
color='black', linewidth=ecg_linewidth)
lead_start_time = rhythm_data.TIME[
rhythm_data[lead].notna()].values[0] + x_margin
# Plot global annotations
if anns_df is not None:
if anns_df.shape[0] > 0:
ann_voffset = 1.0
for j, ann in anns_df[
anns_df["LEADNAM"] == lead].iterrows():
# Annotation type
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[
ann["ECGLIBANNTYPE"]]
else:
ann_voffset = ann_voffset - 0.3
if ann_voffset < 0.0:
ann_voffset = 1.0
ax1.text(ann["TIME"] + lead_start_time,
lead_zero + ann_voffset, ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Annotation vertical line
ann_x = ann["TIME"] + lead_start_time,
ax1.plot([ann_x, ann_x],
[lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
idx = idx + 1
# Plot global
if anns_df is not None:
if anns_df.shape[0] > 0:
for idx, ann in anns_df[anns_df["LEADNAM"] == "GLOBAL"].iterrows():
# Annotation type
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ax1.text(ann["TIME"] + xmin + x_margin,
lead_zero - ann_voffset, ann["ECGLIBANNTYPE"],
size=textsize-1, color="red")
# Annotation vertical line
ax1.axvline(x=ann["TIME"] + x_margin,
color="red", linewidth=0.5, linestyle=":")
# Turn off tick labels
ax1.set_xticks([])
ax1.set_yticks([])
# Set figure width and height
ax1.set_xlim(xmin, xmax + x_margin)
ax1.set_ylim(ecg_ymin, ecg_ymax)
if for_gui:
# Close plt
plt.close()
return fig
def plot_stdleads_matrix(rhythm_data: pd.DataFrame,
anns_df: pd.DataFrame = None,
fig: figure.Figure = None,
dpi: int = 300,
textsize: int = 6,
ecg_linewidth: float = 0.6,
plot_grid: bool = True,
grid_color: str = "#a88332",
v_offset: float = 1.5,
xmin: float = 0.0, xmax: float = 10000.0,
ymin: float = -1.5, ymax: float = 1.5,
x_margin: float = 280,
for_gui: bool = True) -> figure.Figure:
"""Plots the waveform and annotations in a 3x4 + lead II layout
Args:
rhythm_data (pd.DataFrame): aECG waveform as returned by
:any:`Aecg.rhythm_as_df` or :any:`Aecg.derived_as_df`.
anns_df (pd.DataFrame, optional): aECG annotations. For example,
as returned by pd.DataFrame(the_aecg.DERIVEDANNS[0].anns) where
the_aecg is an :any:`Aecg` object. Defaults to None.
fig (figure.Figure, optional): Figure containing the plot. Defaults to
None.
dpi (int, optional): Plot resolution in dots per inch (dpi). Defaults
to 300.
textsize (int, optional): Default text fontsize. Defaults to 6.
ecg_linewidth (float, optional): Line width for the ECG waveform.
Defaults to 0.3.
plot_grid (bool, optional): Indicates whether to plot the standard ECG
grid. Defaults to True.
grid_color (str, optional): Color of the ECG grid. Defaults to
"#a88332".
v_offset (float, optional): Vertical offset between leads in mV.
Defaults to 1.5.
xmin (float, optional): X axis minimum value in ms. Defaults to 0.0.
xmax (float, optional): X axis maximum value in ms. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
10000.0.
ymin (float, optional): Y axis minimum value in mV. Defaults to -1.5.
ymax (float, optional): Y axis maximum value in mV. This value may be
adjusted automatically when maintaining aspect ratio. Defaults to
1.5.
x_margin (float, optional): Margin on the X axis in ms. Defaults to
280.
for_gui (bool, optional): Indicates whether to plot is generated for
a graphical user interface. If true, the figure will be closed
before returning the object so a canvas will be needed to render it
. Otherwise, the figure will be return immediately. Defaults to
True
Returns:
figure.Figure: Plot of the aECG waveforms and its annotations
"""
# Add offsets to the leads to match desired 3x4 layout
h_offset = 2500
column_padding = 50
# Check if standard leads are present and, if not, populate with np.nan
for lead in ["I", "II", "III", "aVR", "aVL", "aVF",
"V1", "V2", "V3", "V4", "V5", "V6"]:
if lead not in rhythm_data.columns:
rhythm_data[lead] = np.nan
beat_plot_col1 = rhythm_data[rhythm_data.TIME < (
h_offset - column_padding)][["TIME", "I", "II", "III"]].copy()
beat_plot_col2 = rhythm_data[(rhythm_data.TIME >= h_offset) &
(rhythm_data.TIME <
(2 * h_offset - column_padding))][
["TIME", "aVR", "aVF", "aVL"]].copy()
beat_plot_col3 = rhythm_data[(rhythm_data.TIME >= (2 * h_offset)) &
(rhythm_data.TIME <
(3 * h_offset - column_padding))][
["TIME", "V1", "V2", "V3"]].copy()
beat_plot_col4 = rhythm_data[(rhythm_data.TIME >= (3 * h_offset)) &
(rhythm_data.TIME <
(4 * h_offset - column_padding))][
["TIME", "V4", "V5", "V6"]].copy()
beat_plot = pd.concat(
[beat_plot_col1, beat_plot_col2, beat_plot_col3, beat_plot_col4])
anns_matrix = None
anns_matrix_col1 = None
anns_matrix_col2 = None
anns_matrix_col3 = None
anns_matrix_col4 = None
if anns_df is not None:
if anns_df.shape[0] > 0:
anns_matrix_col1 = anns_df[anns_df.TIME < (
h_offset - column_padding)].copy()
anns_matrix_col2 = anns_df[(anns_df.TIME >= h_offset) &
(anns_df.TIME <
(2 * h_offset - column_padding))].copy()
anns_matrix_col3 = anns_df[(anns_df.TIME >= (2 * h_offset)) &
(anns_df.TIME <
(3 * h_offset - column_padding))].copy()
anns_matrix_col4 = anns_df[(anns_df.TIME >= (3 * h_offset)) &
(anns_df.TIME <
(4 * h_offset - column_padding))].copy()
anns_matrix = pd.concat([anns_matrix_col1, anns_matrix_col2,
anns_matrix_col3, anns_matrix_col4])
# Compute maximum height range based on number of leads
ecg_ymin = min(ymin, -4*v_offset)
ecg_ymax = max(v_offset, ymax)
# Compute image size
ecg_width = (xmax - xmin + x_margin) / 40.0 # mm (25 mm/s -> 1 mm x 0.04s)
# mm ( 10 mm/mV -> 1 mm x 0.1 mV)
ecg_height = (ecg_ymax - ecg_ymin) * 10.0
ecg_w_in = ecg_width / 25.4 # inches
ecg_h_in = ecg_height / 25.4 # inches
# Figure size
if fig is None:
fig = plt.figure(dpi=dpi)
else:
fig.clear()
fig.set_size_inches(ecg_w_in, ecg_h_in)
fig.set_dpi(dpi)
fig.set_facecolor('w')
fig.set_edgecolor('k')
ax1 = fig.add_axes([0, 0, 1, 1], frameon=False)
# ecg grid
if plot_grid:
grid_major_x = np.arange(0, xmax + x_margin, 200)
grid_minor_x = np.arange(0, xmax + x_margin, 40)
for xc in grid_major_x:
ax1.axvline(x=xc, color=grid_color, linewidth=0.5)
for xc in grid_minor_x:
ax1.axvline(x=xc, color=grid_color, linewidth=0.2)
grid_major_y = np.arange(-4 * v_offset, v_offset, 0.5)
grid_minor_y = np.arange(-4 * v_offset, v_offset, 0.1)
for yc in grid_major_y:
ax1.axhline(y=yc, color=grid_color, linewidth=0.5)
for yc in grid_minor_y:
ax1.axhline(y=yc, color=grid_color, linewidth=0.2)
ecglibann_voffset = {"RPEAK": 1.0, "PON": 0.7,
"QON": 0.4, "QOFF": 0.7,
"TOFF": 0.4}
# First column
# ecg calibration pulse
lead_zero = 0.0
ax1.plot([40, 80, 80, 280, 280, 320],
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead I
tmp = plt.text(x_margin + 80, 0.55, 'I', size=textsize)
if "I" in rhythm_data.columns:
ax1.plot(beat_plot.TIME[beat_plot.I.notna()] + x_margin,
beat_plot.I[beat_plot.I.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.I.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.I.notna()].values[-1] + x_margin
if anns_matrix_col1 is not None:
for idx, ann in anns_matrix_col1[
anns_matrix_col1["LEADNAM"] == "I"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - v_offset
ax1.plot([40, 80, 80, 280, 280, 320],
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead II
ax1.text(x_margin + 80, 0.55 + lead_zero, 'II', size=textsize)
if "II" in rhythm_data.columns:
beat_plot.II = beat_plot.II + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.II.notna()] + x_margin,
beat_plot.II[beat_plot.II.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.II.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.II.notna()].values[-1] + x_margin
if anns_matrix_col1 is not None:
for idx, ann in anns_matrix_col1[
anns_matrix_col1["LEADNAM"] == "II"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x < col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - 2 * v_offset
ax1.plot([40, 80, 80, 280, 280, 320],
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead III
ax1.text(x_margin + 80, 0.55 + lead_zero, 'III', size=textsize)
if "III" in rhythm_data.columns:
beat_plot.III = beat_plot.III + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.III.notna()] + x_margin,
beat_plot.III[beat_plot.III.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.III.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.III.notna()].values[-1] + x_margin
if anns_matrix_col1 is not None:
for idx, ann in anns_matrix_col1[
anns_matrix_col1["LEADNAM"] == "III"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Second column
# ecg calibration pulse
lead_zero = 0
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead aVR
ax1.text(h_offset + x_margin + 80, 0.55, 'aVR', size=textsize)
if "aVR" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.aVR.notna()]) > 0:
ax1.plot(beat_plot.TIME[beat_plot.aVR.notna()] + x_margin,
beat_plot.aVR[beat_plot.aVR.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.aVR.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.aVR.notna()].values[-1] + x_margin
if anns_matrix_col2 is not None:
for idx, ann in anns_matrix_col2[
anns_matrix_col2["LEADNAM"] == "aVR"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead aVL
ax1.text(h_offset + x_margin + 80, 0.55 + lead_zero, 'aVL', size=textsize)
if "aVL" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.aVL.notna()]) > 0:
beat_plot.aVL = beat_plot.aVL + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.aVL.notna()] + x_margin,
beat_plot.aVL[beat_plot.aVL.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.aVL.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.aVL.notna()].values[-1] + x_margin
if anns_matrix_col2 is not None:
for idx, ann in anns_matrix_col2[
anns_matrix_col2["LEADNAM"] == "aVL"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
lead_zero = - 2 * v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead aVF
ax1.text(h_offset + x_margin + 80, 0.55 + lead_zero, 'aVF', size=textsize)
if "aVF" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.aVF.notna()]) > 0:
beat_plot.aVF = beat_plot.aVF + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.aVF.notna()] + x_margin,
beat_plot.aVF[beat_plot.aVF.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.aVF.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.aVF.notna()].values[-1] + x_margin
if anns_matrix_col2 is not None:
for idx, ann in anns_matrix_col2[
anns_matrix_col2["LEADNAM"] == "aVF"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Third column
# ecg calibration pulse
lead_zero = 0
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 2*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V1
ax1.text(2 * h_offset + x_margin + 80, 0.55, 'V1', size=textsize)
if "V1" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V1.notna()]) > 0:
ax1.plot(beat_plot.TIME[beat_plot.V1.notna()] + x_margin,
beat_plot.V1[beat_plot.V1.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V1.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V1.notna()].values[-1] + x_margin
if anns_matrix_col3 is not None:
for idx, ann in anns_matrix_col3[
anns_matrix_col3["LEADNAM"] == "V1"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 2*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V2
ax1.text(2 * h_offset + x_margin + 80, 0.55 +
lead_zero, 'V2', size=textsize)
if "V2" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V2.notna()]) > 0:
beat_plot.V2 = beat_plot.V2 + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.V2.notna()] + x_margin,
beat_plot.V2[beat_plot.V2.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V2.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V2.notna()].values[-1] + x_margin
if anns_matrix_col3 is not None:
for idx, ann in anns_matrix_col3[
anns_matrix_col3["LEADNAM"] == "V2"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - 2 * v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 2*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V3
ax1.text(2 * h_offset + x_margin + 80, 0.55 +
lead_zero, 'V3', size=textsize)
if "V3" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V3.notna()]) > 0:
beat_plot.V3 = beat_plot.V3 + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.V3.notna()] + x_margin,
beat_plot.V3[beat_plot.V3.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V3.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V3.notna()].values[-1] + x_margin
if anns_matrix_col3 is not None:
for idx, ann in anns_matrix_col3[
anns_matrix_col3["LEADNAM"] == "V3"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Fourth column
# ecg calibration pulse
lead_zero = 0
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 3*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V4
ax1.text(3 * h_offset + x_margin + 80, 0.55, 'V4', size=textsize)
if "V4" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V4.notna()]) > 0:
ax1.plot(beat_plot.TIME[beat_plot.V4.notna()] + x_margin,
beat_plot.V4[beat_plot.V4.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V4.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V4.notna()].values[-1] + x_margin
if anns_matrix_col4 is not None:
for idx, ann in anns_matrix_col4[
anns_matrix_col4["LEADNAM"] == "V4"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 3*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V5
ax1.text(3 * h_offset + x_margin + 80, 0.55 +
lead_zero, 'V5', size=textsize)
if "V5" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V5.notna()]) > 0:
beat_plot.V5 = beat_plot.V5 + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.V5.notna()] + x_margin,
beat_plot.V5[beat_plot.V5.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V5.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V5.notna()].values[-1] + x_margin
if anns_matrix_col4 is not None:
for idx, ann in anns_matrix_col4[
anns_matrix_col4["LEADNAM"] == "V5"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# ecg calibration pulse
lead_zero = - 2 * v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]) + 3*h_offset,
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
# Lead V6
ax1.text(3 * h_offset + x_margin + 80, 0.55 +
lead_zero, 'V6', size=textsize)
if "V6" in rhythm_data.columns and\
len(beat_plot.TIME[beat_plot.V6.notna()]) > 0:
beat_plot.V6 = beat_plot.V6 + lead_zero
ax1.plot(beat_plot.TIME[beat_plot.V6.notna()] + x_margin,
beat_plot.V6[beat_plot.V6.notna()], color='black',
linewidth=ecg_linewidth)
lead_start_time = beat_plot.TIME[
beat_plot.V6.notna()].values[0] + x_margin
col_end = beat_plot.TIME[
beat_plot.V6.notna()].values[-1] + x_margin
if anns_matrix_col4 is not None:
for idx, ann in anns_matrix_col4[
anns_matrix_col4["LEADNAM"] == "V6"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x], [lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Rhythm strip
# ecg calibration pulse
lead_zero = - 0.5 - 3 * v_offset
ax1.plot(np.array([40, 80, 80, 280, 280, 320]),
[lead_zero, lead_zero, lead_zero + 1,
lead_zero + 1, lead_zero, lead_zero],
color='black', linewidth=0.5)
ax1.text(x_margin + 80, lead_zero + 0.55, 'II', size=textsize)
if "II" in rhythm_data.columns:
ax1.plot(rhythm_data.TIME + x_margin, rhythm_data.II + lead_zero,
color='black', linewidth=ecg_linewidth)
lead_start_time = rhythm_data.TIME[
rhythm_data.II.notna()].values[0] + x_margin
col_end = rhythm_data.TIME[
rhythm_data.II.notna()].values[-1] + x_margin
if anns_df is not None:
if anns_df.shape[0] > 0:
for idx, ann in anns_df[anns_df["LEADNAM"] == "II"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
ann_x = ann["TIME"] + lead_start_time
if ann_x <= col_end:
ax1.plot([ann_x, ann_x],
[lead_zero-1.0, lead_zero+1.0],
color="blue", linewidth=0.5)
ax1.text(ann_x,
lead_zero + ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="blue")
# Plot global annotations
if anns_matrix is not None:
if anns_matrix.shape[0] > 0:
for idx, ann in anns_matrix[
anns_matrix["LEADNAM"] == "GLOBAL"].iterrows():
ann_voffset = 1.0
if ann["ECGLIBANNTYPE"] in ecglibann_voffset.keys():
ann_voffset = ecglibann_voffset[ann["ECGLIBANNTYPE"]]
# Columns
ann_x = ann["TIME"] + xmin + x_margin
ax1.plot([ann_x, ann_x], [-0.5 - 2 * v_offset, ymax-0.5],
color="red", linewidth=0.5, linestyle=":")
# Lead II strip at the bottom
ax1.plot([ann_x, ann_x], [-1.0 - 3 * v_offset, 1.0 - 0.5 -
3 * v_offset], color="red",
linewidth=0.5, linestyle=":")
ax1.text(ann_x,
- 3 * v_offset - ann_voffset,
ann["ECGLIBANNTYPE"],
size=textsize-1, color="red")
# Turn off tick labels
ax1.set_xticks([])
ax1.set_yticks([])
# Set figure width and height
ax1.set_xlim(xmin, xmax + x_margin)
ax1.set_ylim(ecg_ymin, ecg_ymax)
if for_gui:
# Close plt
tmp = plt.close()
return fig
def ratio_of_missing_samples(waveform_data: pd.DataFrame) -> float:
"""Returns the ration of missing samples in a waveform
Calculates the total number of samples as well as the number of samples
reported as np.nan values (i.e., missing) and returns the ration of missing
over the total number of samples.
Args:
waveform_data (pd.DataFrame): Waveform data like the one returned by
:any:`Aecg.rhythm_as_df`
Returns:
float: ration of number of missing over the total number of samples
"""
total_samples = (waveform_data.shape[1] - 1) * waveform_data.shape[0]
not_nans = waveform_data.drop(columns=["TIME"]).count().sum()
num_nans = total_samples - not_nans
missing_ratio = num_nans / total_samples
return missing_ratio
| 46.747014
| 79
| 0.534701
| 5,492
| 43,054
| 3.98343
| 0.058267
| 0.05924
| 0.031266
| 0.032911
| 0.885816
| 0.85583
| 0.819354
| 0.784203
| 0.770718
| 0.737441
| 0
| 0.041291
| 0.358736
| 43,054
| 920
| 80
| 46.797826
| 0.751096
| 0.192479
| 0
| 0.663636
| 0
| 0
| 0.039258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006061
| false
| 0
| 0.009091
| 0
| 0.027273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d941d95cc530a79ef9a953e17e71fc87d1ed8b4f
| 201
|
py
|
Python
|
Code/odooerp/odoo-8.0/openerp/addons/account/tests/__init__.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/account/tests/__init__.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/account/tests/__init__.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
from . import test_tax
from . import test_search
from . import test_reconciliation
from . import test_account_move_closed_period
from . import test_fiscal_position
from . import test_product_id_change
| 28.714286
| 45
| 0.850746
| 30
| 201
| 5.3
| 0.5
| 0.377358
| 0.528302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 201
| 6
| 46
| 33.5
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d94a49249ecf348e7e94c0cdb73f38ac2f6973a1
| 14,277
|
py
|
Python
|
test/hummingbot/connector/derivative/test_perpetual_budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 2
|
2022-03-03T10:00:27.000Z
|
2022-03-08T13:57:56.000Z
|
test/hummingbot/connector/derivative/test_perpetual_budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 6
|
2022-01-31T15:44:54.000Z
|
2022-03-06T04:27:12.000Z
|
test/hummingbot/connector/derivative/test_perpetual_budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1
|
2022-02-22T11:03:02.000Z
|
2022-02-22T11:03:02.000Z
|
import unittest
from decimal import Decimal
from test.mock.mock_perp_connector import MockPerpConnector
from hummingbot.connector.derivative.perpetual_budget_checker import PerpetualBudgetChecker
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams
from hummingbot.connector.utils import combine_to_hb_trading_pair
from hummingbot.core.data_type.order_candidate import PerpetualOrderCandidate
from hummingbot.core.data_type.trade_fee import TradeFeeSchema
from hummingbot.core.event.events import OrderType, TradeType
class PerpetualBudgetCheckerTest(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.base_asset = "COINALPHA"
self.quote_asset = "HBOT"
self.trading_pair = f"{self.base_asset}-{self.quote_asset}"
trade_fee_schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("0.01"), taker_percent_fee_decimal=Decimal("0.02")
)
self.exchange = MockPerpConnector(trade_fee_schema)
self.budget_checker = self.exchange.budget_checker
def test_populate_collateral_fields_buy_order(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("20"), populated_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_fields_taker_buy_order(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=False,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("20"), populated_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.4"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.4"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_fields_buy_order_with_leverage(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
leverage=Decimal("2")
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("10"), populated_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_fields_sell_order(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.SELL,
amount=Decimal("10"),
price=Decimal("2"),
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("20"), populated_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_fields_sell_order_with_leverage(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.SELL,
amount=Decimal("10"),
price=Decimal("2"),
leverage=Decimal("2"),
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("10"), populated_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.2"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_fields_percent_fees_in_third_token(self):
pfc_token = "PFC"
trade_fee_schema = TradeFeeSchema(
percent_fee_token=pfc_token,
maker_percent_fee_decimal=Decimal("0.01"),
taker_percent_fee_decimal=Decimal("0.01"),
)
exchange = MockPerpConnector(trade_fee_schema)
pfc_quote_pair = combine_to_hb_trading_pair(self.quote_asset, pfc_token)
exchange.set_balanced_order_book( # the quote to pfc price will be 1:2
trading_pair=pfc_quote_pair,
mid_price=1.5,
min_price=1,
max_price=2,
price_step_size=1,
volume_step_size=1,
)
budget_checker: PerpetualBudgetChecker = exchange.budget_checker
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
leverage=Decimal("2"),
)
populated_candidate = budget_checker.populate_collateral_entries(order_candidate)
self.assertEqual(self.quote_asset, populated_candidate.order_collateral.token)
self.assertEqual(Decimal("10"), populated_candidate.order_collateral.amount)
self.assertEqual(pfc_token, populated_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.4"), populated_candidate.percent_fee_collateral.amount)
self.assertEqual(pfc_token, populated_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.4"), populated_candidate.percent_fee_value.amount)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertIsNone(populated_candidate.potential_returns) # order results in position open
def test_populate_collateral_for_position_close(self):
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.SELL,
amount=Decimal("10"),
price=Decimal("2"),
leverage=Decimal("2"),
position_close=True,
)
populated_candidate = self.budget_checker.populate_collateral_entries(order_candidate)
self.assertIsNone(populated_candidate.order_collateral) # the collateral is the contract itself
self.assertIsNone(populated_candidate.percent_fee_collateral)
self.assertIsNone(populated_candidate.percent_fee_value)
self.assertEqual(0, len(populated_candidate.fixed_fee_collaterals))
self.assertEqual(self.quote_asset, populated_candidate.potential_returns.token)
self.assertEqual(Decimal("19.8"), populated_candidate.potential_returns.amount)
def test_adjust_candidate_sufficient_funds(self):
self.exchange.set_balance(self.quote_asset, Decimal("100"))
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
)
adjusted_candidate = self.budget_checker.adjust_candidate(order_candidate)
self.assertEqual(Decimal("10"), adjusted_candidate.amount)
self.assertEqual(self.quote_asset, adjusted_candidate.order_collateral.token)
self.assertEqual(Decimal("20"), adjusted_candidate.order_collateral.amount)
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.2"), adjusted_candidate.percent_fee_collateral.amount)
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.2"), adjusted_candidate.percent_fee_value.amount)
self.assertEqual(0, len(adjusted_candidate.fixed_fee_collaterals))
self.assertIsNone(adjusted_candidate.potential_returns) # order results in position open
def test_adjust_candidate_buy_insufficient_funds_partial_adjustment_allowed(self):
q_params = QuantizationParams(
trading_pair=self.trading_pair,
price_precision=8,
price_decimals=2,
order_size_precision=8,
order_size_decimals=2,
)
self.exchange.set_quantization_param(q_params)
self.exchange.set_balance(self.quote_asset, Decimal("10"))
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.BUY,
amount=Decimal("10"),
price=Decimal("2"),
)
adjusted_candidate = self.budget_checker.adjust_candidate(order_candidate, all_or_none=False)
self.assertEqual(Decimal("4.95"), adjusted_candidate.amount) # 5 * .99
self.assertEqual(self.quote_asset, adjusted_candidate.order_collateral.token)
self.assertEqual(Decimal("9.9"), adjusted_candidate.order_collateral.amount) # 4.95 * 2
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.099"), adjusted_candidate.percent_fee_collateral.amount) # 9.9 * 0.01
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.099"), adjusted_candidate.percent_fee_value.amount) # 9.9 * 0.01
self.assertEqual(0, len(adjusted_candidate.fixed_fee_collaterals))
self.assertIsNone(adjusted_candidate.potential_returns) # order results in position open
def test_adjust_candidate_sell_insufficient_funds_partial_adjustment_allowed(self):
q_params = QuantizationParams(
trading_pair=self.trading_pair,
price_precision=8,
price_decimals=2,
order_size_precision=8,
order_size_decimals=2,
)
self.exchange.set_quantization_param(q_params)
self.exchange.set_balance(self.quote_asset, Decimal("10"))
order_candidate = PerpetualOrderCandidate(
trading_pair=self.trading_pair,
is_maker=True,
order_type=OrderType.LIMIT,
order_side=TradeType.SELL,
amount=Decimal("10"),
price=Decimal("2"),
)
adjusted_candidate = self.budget_checker.adjust_candidate(order_candidate, all_or_none=False)
self.assertEqual(Decimal("4.95"), adjusted_candidate.amount) # 5 * .99
self.assertEqual(self.quote_asset, adjusted_candidate.order_collateral.token)
self.assertEqual(Decimal("9.9"), adjusted_candidate.order_collateral.amount) # 4.95 * 2
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_collateral.token)
self.assertEqual(Decimal("0.099"), adjusted_candidate.percent_fee_collateral.amount) # 9.9 * 0.01
self.assertEqual(self.quote_asset, adjusted_candidate.percent_fee_value.token)
self.assertEqual(Decimal("0.099"), adjusted_candidate.percent_fee_value.amount) # 9.9 * 0.01
self.assertEqual(0, len(adjusted_candidate.fixed_fee_collaterals))
self.assertIsNone(adjusted_candidate.potential_returns) # order results in position open
| 52.488971
| 106
| 0.72193
| 1,614
| 14,277
| 6.070632
| 0.084263
| 0.105634
| 0.073689
| 0.077159
| 0.871709
| 0.851602
| 0.842417
| 0.834558
| 0.829353
| 0.826801
| 0
| 0.016104
| 0.191007
| 14,277
| 271
| 107
| 52.682657
| 0.832208
| 0.030048
| 0
| 0.694215
| 0
| 0
| 0.014825
| 0.002603
| 0
| 0
| 0
| 0
| 0.334711
| 1
| 0.045455
| false
| 0
| 0.03719
| 0
| 0.086777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
794c140085a1b064a2e760575ac7ddd4aa5577d8
| 142
|
py
|
Python
|
cmc/python/__init__.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 1
|
2020-12-24T22:00:01.000Z
|
2020-12-24T22:00:01.000Z
|
cmc/python/__init__.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | null | null | null |
cmc/python/__init__.py
|
hschwane/offline_production
|
e14a6493782f613b8bbe64217559765d5213dc1e
|
[
"MIT"
] | 3
|
2020-07-17T09:20:29.000Z
|
2021-03-30T16:44:18.000Z
|
from icecube import icetray,dataclasses,sim_services
from icecube.load_pybindings import load_pybindings
load_pybindings(__name__, __path__)
| 28.4
| 52
| 0.880282
| 18
| 142
| 6.277778
| 0.611111
| 0.371681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077465
| 142
| 4
| 53
| 35.5
| 0.862595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79a1e2628569761d819316ed465a5bdf7b788119
| 143
|
py
|
Python
|
oslo/__init__.py
|
Mehrad0711/oslo
|
873d771a68bc380903947010da0b66f58f60e496
|
[
"Apache-2.0"
] | null | null | null |
oslo/__init__.py
|
Mehrad0711/oslo
|
873d771a68bc380903947010da0b66f58f60e496
|
[
"Apache-2.0"
] | null | null | null |
oslo/__init__.py
|
Mehrad0711/oslo
|
873d771a68bc380903947010da0b66f58f60e496
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 TUNiB inc.
from oslo.pytorch import initialize
from oslo.pytorch.model_parallelism.utils.mappings import Column, Row, Update
| 28.6
| 77
| 0.825175
| 20
| 143
| 5.85
| 0.8
| 0.136752
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031496
| 0.111888
| 143
| 4
| 78
| 35.75
| 0.889764
| 0.174825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79b8cd00464bc5302b926ee6622a6d08e27cf1c0
| 87
|
py
|
Python
|
example/articles/tests/perms/__init__.py
|
Formulka/django-fperms
|
88b8fa3dd87075a56d8bfeb2b9993c578c22694e
|
[
"MIT"
] | 3
|
2019-03-29T09:50:45.000Z
|
2021-05-01T21:11:33.000Z
|
example/articles/tests/perms/__init__.py
|
Formulka/django-perms
|
88b8fa3dd87075a56d8bfeb2b9993c578c22694e
|
[
"MIT"
] | 2
|
2018-04-12T00:54:05.000Z
|
2018-04-12T16:32:42.000Z
|
example/articles/tests/perms/__init__.py
|
Formulka/django-perms
|
88b8fa3dd87075a56d8bfeb2b9993c578c22694e
|
[
"MIT"
] | 1
|
2018-07-13T14:42:07.000Z
|
2018-07-13T14:42:07.000Z
|
from .generic import *
from .model import *
from .object import *
from .field import *
| 17.4
| 22
| 0.724138
| 12
| 87
| 5.25
| 0.5
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 87
| 4
| 23
| 21.75
| 0.887324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8dca6c5fe2c9cf1d6a6d311f3f9528b7eb81cfb5
| 19
|
py
|
Python
|
proc/__init__.py
|
nomadiq/maunakini
|
835cf11573129ce5e614382c4d12f3bc9be4c6c5
|
[
"MIT"
] | 1
|
2022-02-03T16:28:54.000Z
|
2022-02-03T16:28:54.000Z
|
proc/__init__.py
|
nomadiq/maunakini
|
835cf11573129ce5e614382c4d12f3bc9be4c6c5
|
[
"MIT"
] | null | null | null |
proc/__init__.py
|
nomadiq/maunakini
|
835cf11573129ce5e614382c4d12f3bc9be4c6c5
|
[
"MIT"
] | 1
|
2022-02-03T16:25:59.000Z
|
2022-02-03T16:25:59.000Z
|
from .proc import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8dda7cefccdac6f0a8f8ccf597df7a23e1982766
| 928
|
py
|
Python
|
10-Making a scatter plot.py
|
mnabavi84/dcamp-intro-dscience-python
|
de6dbdf7328e0cdfaab218c01589db269abb100c
|
[
"MIT"
] | null | null | null |
10-Making a scatter plot.py
|
mnabavi84/dcamp-intro-dscience-python
|
de6dbdf7328e0cdfaab218c01589db269abb100c
|
[
"MIT"
] | null | null | null |
10-Making a scatter plot.py
|
mnabavi84/dcamp-intro-dscience-python
|
de6dbdf7328e0cdfaab218c01589db269abb100c
|
[
"MIT"
] | null | null | null |
# Explore the data
print(cellphone.head())
# Create a scatter plot of the data from the DataFrame cellphone
plt.scatter(cellphone.x, cellphone.y)
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
# Change the marker color to red
plt.scatter(cellphone.x, cellphone.y,
color='red')
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
# Change the marker shape to square
plt.scatter(cellphone.x, cellphone.y,
color='red',
marker='s')
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
# Change the transparency to 0.1
plt.scatter(cellphone.x, cellphone.y,
color='red',
marker='s',
alpha=0.1)
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
| 17.846154
| 65
| 0.632543
| 124
| 928
| 4.733871
| 0.290323
| 0.068143
| 0.129472
| 0.136286
| 0.778535
| 0.778535
| 0.727428
| 0.727428
| 0.662692
| 0.662692
| 0
| 0.005674
| 0.240302
| 928
| 51
| 66
| 18.196078
| 0.82695
| 0.309267
| 0
| 0.73913
| 0
| 0
| 0.137153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c05e1e780f57d02f7ad285145df43eafff29192
| 5,960
|
py
|
Python
|
finnish_media_scrapers/htmltotext.py
|
hsci-r/finnish-media-scraper
|
b63b54e3cdc2c55b426eeeeb9656da45a8a9ed4f
|
[
"MIT"
] | 6
|
2021-06-29T10:31:01.000Z
|
2022-03-16T16:02:31.000Z
|
finnish_media_scrapers/htmltotext.py
|
hsci-r/finnish-media-scraper
|
b63b54e3cdc2c55b426eeeeb9656da45a8a9ed4f
|
[
"MIT"
] | 6
|
2021-06-28T15:09:03.000Z
|
2022-01-10T12:10:05.000Z
|
finnish_media_scrapers/htmltotext.py
|
hsci-r/finnish-media-scraper
|
b63b54e3cdc2c55b426eeeeb9656da45a8a9ed4f
|
[
"MIT"
] | 1
|
2021-07-17T00:35:09.000Z
|
2021-07-17T00:35:09.000Z
|
"""Functions to extract article plain texts from the YLE/HS/IL/IS HTML articles
"""
import re
from typing import TextIO, Union
from bs4 import BeautifulSoup, NavigableString
def extract_text_from_svyle_html(html: Union[str, TextIO]) -> str:
"""Extract article text from Svenska YLE article HTML
Args:
html (Union[str,TextIO]): a string or a file-like object containing the article HTML
Raises:
ValueError: The layout of the article was not recognized, or the article parsed as empty
Returns:
str: article text
"""
soup = BeautifulSoup(html, 'lxml')
elem = soup.select_one('article#main-content')
if elem is None:
raise ValueError("Article layout not recognized")
for elem_to_remove in soup.select('aside#id-article__tags'):
elem_to_remove.extract()
for elem_to_remove in soup.select('#comments'):
elem_to_remove.extract()
for elem_to_remove in soup.select('.ydd-share-buttons'):
elem_to_remove.extract()
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'div']:
for block_elem in elem.find_all(tag):
block_elem.insert_after(NavigableString('\n\n'))
txt = elem.get_text().strip()
if txt == "":
raise ValueError("Parsing results in an empty article")
return txt
def extract_text_from_yle_html(html: Union[str, TextIO]) -> str:
"""Extract article text from YLE article HTML
Args:
html (Union[str,TextIO]): a string or a file-like object containing the article HTML
Raises:
ValueError: The layout of the article was not recognized, or the article parsed as empty
Returns:
str: article text
"""
soup = BeautifulSoup(html, 'lxml')
elem = soup.select_one('.yle__article')
if elem is None:
elem = soup.select_one('#yle__section--article')
if elem is None:
elem = soup.select_one('article.content')
if elem is None:
raise ValueError("Article layout not recognized")
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'div']:
for block_elem in elem.find_all(tag):
block_elem.insert_after(NavigableString('\n\n'))
txt = elem.get_text().strip()
if txt == "":
raise ValueError("Parsing results in an empty article")
return txt
def extract_text_from_is_html(html: Union[str, TextIO]) -> str:
"""Extract article text from Ilta-Sanomat article HTML
Args:
html (Union[str,TextIO]): a string or a file-like object containing the article HTML
Raises:
ValueError: The layout of the article was not recognized, or the article parsed as empty
Returns:
str: article text
"""
soup = BeautifulSoup(html, 'lxml')
elem = soup.select_one(
'article.single-article,article.article--m,article.article--l,article.article--xl-picture-top,article.article--xl-title-top')
if elem is None:
raise ValueError("Article layout not recognized")
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'div']:
for block_elem in elem.find_all(tag):
block_elem.insert_after(NavigableString('\n\n'))
txt = elem.get_text().strip()
if txt == "":
raise ValueError("Parsing results in an empty article")
return txt
def extract_text_from_il_html(html: Union[str, TextIO]) -> str:
"""Extract article text from Iltalehti article HTML
Args:
html (Union[str,TextIO]): a string or a file-like object containing the article HTML
Raises:
ValueError: The layout of the article was not recognized, or the article parsed as empty
Returns:
str: article text
"""
soup = BeautifulSoup(html, 'lxml')
soup = soup.select_one('.article-content')
if soup is None:
raise ValueError("Article layout not recognized")
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'div']:
for block_elem in soup.find_all(tag):
block_elem.insert_after(NavigableString('\n\n'))
txt = soup.get_text().strip()
if txt == "":
raise ValueError("Parsing results in an empty article")
return txt
def extract_text_from_hs_html(html: Union[str, TextIO]) -> str:
"""Extract article text from Helsingin Sanomat article HTML
Args:
html (Union[str,TextIO]): a string or a file-like object containing the article HTML
Raises:
ValueError: The layout of the article was not recognized, or the article parsed as empty
Returns:
str: article text
"""
soup = BeautifulSoup(html, 'lxml')
elem = soup.select_one('#__nuxt,article.article--xxl')
if elem is not None:
soup = elem
else:
elem = soup.find('main')
if elem is not None:
soup = elem
elem = soup.select_one('div#page-main-content + article')
if elem is not None:
soup = elem
else:
elem = soup.select_one('div#page-main-content,#paid-content')
if elem is not None:
soup = elem
else:
raise ValueError("Article layout not recognized")
for elem in soup.find_all('aside'):
elem.extract()
for elem in soup.select('section.article-body + div'):
elem.extract()
for elem_to_remove in soup.select('div.article-info'):
elem_to_remove.extract()
for elem_to_remove in soup.select('div.related-articles'):
elem_to_remove.extract()
for elem_to_remove in soup.select('div.article-actions'):
elem_to_remove.extract()
for tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'div']:
for block_elem in soup.find_all(tag):
block_elem.insert_after(NavigableString('\n\n'))
txt = soup.get_text()
txt = txt.replace("\xad", "")
txt = re.sub("\n\n+", "\n\n", txt)
txt = txt.strip()
if txt == "":
raise ValueError("Parsing results in an empty article")
return txt
| 35.058824
| 133
| 0.632886
| 827
| 5,960
| 4.455865
| 0.136638
| 0.043419
| 0.039077
| 0.048847
| 0.858073
| 0.852917
| 0.843148
| 0.831479
| 0.793216
| 0.773677
| 0
| 0.008
| 0.244966
| 5,960
| 169
| 134
| 35.266272
| 0.810889
| 0.247987
| 0
| 0.67
| 0
| 0.01
| 0.209988
| 0.058072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.03
| 0
| 0.13
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a50ce6809db33721cc47fbb9073e3f4265f0b647
| 13,146
|
py
|
Python
|
solver/prolog_attribute_equation_evaluator.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
solver/prolog_attribute_equation_evaluator.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
solver/prolog_attribute_equation_evaluator.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
'''
Created on 2015-01-16
@author: levi
'''
from pyswip import Prolog
from attribute_equation_solver import AttributeEquationSolver
class PrologAttributeEquationEvaluator(AttributeEquationSolver):
"""
Simple constraint solver based on prolog for string equations in path conditions.
Requires pyswip, a bridge between python and prolog to be installed.
For information on how to install pyswip see: https://code.google.com/p/pyswip/
"""
# to generate fresh var ID names
varID = 0
# Keep the variable names in the prolog expression for all attributes of the same
# name connected to the same object. This is necessary because an object may have connected
# to it more than one attribute with the same name, which means the attribute has multiple
# constraints for that object.
varNameDatabase = {}
def __init__(self, verbosity):
self.verbosity = verbosity
def newVarID(self):
old_varID = self.varID
self.varID += 1
return "V" + str(old_varID)
# def build_equation_expression(self, node, pathCondition, variablesInExpression, concatsInExpression):
# """
# helper for building the attribute equations by recursively going through the operations associated
# to the left hand side and to the right hand side of an equation
# """
#
# # in case it's an attribute, return the object's ID
# if pathCondition.vs[node]['mm__'] == 'Attribute':
# variablesInExpression.append("X" + str(node))
# return "X" + str(node)
# # in case it's a constant, return its value as a list
# elif pathCondition.vs[node]['mm__'] == 'Constant':
# constant = pathCondition.vs[node]['name']
# print "------> " + constant
# constAsList = "["
# for c in range(0,len(constant)):
# constAsList += "'" + constant[c] + "'"
# if c < len(constant) - 1:
# constAsList += ","
# constAsList += "]"
# return constAsList
# # it's a concat operation
# else:
# # get the arguments of the concat operation
# arg1Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
# arg2Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
# arg1 = pathCondition.neighbors(arg1Edge,1)[0]
# arg2 = pathCondition.neighbors(arg2Edge,1)[0]
# newVar = self.newVarID()
#
# # add the concat operation to the set of append predicates in the body of the rule
# concatsInExpression.append("append(" + self.build_equation_expression(arg1, pathCondition, variablesInExpression, concatsInExpression) + "," + self.build_equation_expression(arg2, pathCondition, variablesInExpression, concatsInExpression) + "," + newVar + ")")
#
# # return the newly created variable
# return newVar
# def __call__(self, pathCondition):
# """
# Evaluates attribute equations by producing a Prolog predicate out of them and attempting to find a solution for that predicate.
# The predicate has as arguments the attributes of the path condition for which a solution needs to exist such that the path condition is possible.
# If a solution is found then the evaluator returns true, otherwise false.
# """
#
# clauseBody = ""
# variablesInExpression = []
# concatsInExpression = []
#
# # grab all the equation nodes in the path condition
# equationNodes = self._find_nodes_with_mm(pathCondition, "Equation")
# # now build all the equations
# if equationNodes != []:
# for equationNode in range(0,len(equationNodes)):
# # get the left and the right expressions of the equation
# leftExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'leftExpr'][0]
# rightExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'rightExpr'][0]
#
# leftExprNode = pathCondition.neighbors(leftExprEdge,1)[0]
# rightExprNode = pathCondition.neighbors(rightExprEdge,1)[0]
#
# leftExpr = self.build_equation_expression(leftExprNode, pathCondition, variablesInExpression, concatsInExpression)
# rightExpr = self.build_equation_expression(rightExprNode, pathCondition, variablesInExpression, concatsInExpression)
#
# if equationNode < len(equationNodes)-1:
# clauseBody += leftExpr + "=" + rightExpr + ","
# else:
# clauseBody += leftExpr + "=" + rightExpr
#
# if concatsInExpression != []:
# clauseBody += ","
# for concat in concatsInExpression:
# clauseBody += concat
#
# clauseHead = "solve("
# # variablesInExpression = list(set(variablesInExpression))
# for var in range(0,len(variablesInExpression)):
# if var < len(variablesInExpression)-1:
# clauseHead += variablesInExpression[var] + ","
# else:
# clauseHead += variablesInExpression[var]
# clauseHead += ")"
#
# prologInput = clauseHead + ":-" + clauseBody
#
# if self.verbosity >= 2 :
# print "\nChecking with Prolog:"
# print "----------------"
# print prologInput
# print "\n"
#
# p = Prolog()
# p.assertz(prologInput)
# # l = list(p.query(clauseHead))
#
# print "Clause head: " + clauseHead
# result = list(p.query(clauseHead))
# print "Prolog result:"
# print result
#
# if result == []:
# if self.verbosity >= 2 : print "Prolog check failed!"
# return False
# else:
# if self.verbosity >= 2 : print "Prolog check succeeded!"
# return True
def build_equation_expression(self, node, pathCondition, variablesInExpression, concatsInExpression, varParentObjects):
"""
helper for building the attribute equations by recursively going through the operations associated
to the left hand side and to the right hand side of an equation
"""
# in case it's an attribute, return the object's ID
if pathCondition.vs[node]['mm__'] == 'Attribute':
# get the parent object of the attribute
attrEdgeMatch = [i for i in pathCondition.neighbors(node,2) if pathCondition.vs[i]['mm__'] == 'hasAttribute_S']
attrEdgeApply = [i for i in pathCondition.neighbors(node,2) if pathCondition.vs[i]['mm__'] == 'hasAttribute_T']
if attrEdgeMatch != []:
parentObject = pathCondition.neighbors(attrEdgeMatch[0],2)[0]
else:
parentObject = pathCondition.neighbors(attrEdgeApply[0],2)[0]
# check if a variable for an attribute having the same name and belonging to the same object has already been created
# and in case it has just return it, otherwise create a new variable
attrName = pathCondition.vs[node]['name']
varDatabaseKey = str(parentObject) + attrName
if not varDatabaseKey in set(self.varNameDatabase.keys()):
self.varNameDatabase[varDatabaseKey] = "X" + str(node)
variablesInExpression.append(self.varNameDatabase[varDatabaseKey])
return self.varNameDatabase[varDatabaseKey]
else:
variablesInExpression.append(self.varNameDatabase[varDatabaseKey])
return self.varNameDatabase[varDatabaseKey]
# in case it's a constant, return its value as a list
elif pathCondition.vs[node]['mm__'] == 'Constant':
constant = pathCondition.vs[node]['name']
constAsList = "["
for c in range(0,len(constant)):
constAsList += "'" + constant[c] + "'"
if c < len(constant) - 1:
constAsList += ","
constAsList += "]"
return constAsList
# it's a concat operation
else:
# get the arguments of the concat operation
arg1Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
arg2Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][1]
arg1 = pathCondition.neighbors(arg1Edge,1)[0]
arg2 = pathCondition.neighbors(arg2Edge,1)[0]
newVar = self.newVarID()
# add the concat operation to the set of append predicates in the body of the rule
concatsInExpression.append("append(" + self.build_equation_expression(arg1, pathCondition, variablesInExpression, concatsInExpression, varParentObjects) + "," + self.build_equation_expression(arg2, pathCondition, variablesInExpression, concatsInExpression, varParentObjects) + "," + newVar + ")")
# return the newly created variable
return newVar
def __call__(self, pathCondition):
"""
Evaluates attribute equations by producing a Prolog predicate out of them and attempting to find a solution for that predicate.
The predicate has as arguments the attributes of the path condition for which a solution needs to exist such that the path condition is possible.
If a solution is found then the evaluator returns true, otherwise false.
"""
clauseBody = ""
variablesInExpression = []
concatsInExpression = []
varParentObjects = []
# grab all the equation nodes in the path condition
equationNodes = self._find_nodes_with_mm(pathCondition, "Equation")
# now build all the equations
if equationNodes != []:
for equationNode in range(0,len(equationNodes)):
# get the left and the right expressions of the equation
leftExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'leftExpr'][0]
rightExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'rightExpr'][0]
leftExprNode = pathCondition.neighbors(leftExprEdge,1)[0]
rightExprNode = pathCondition.neighbors(rightExprEdge,1)[0]
leftExpr = self.build_equation_expression(leftExprNode, pathCondition, variablesInExpression, concatsInExpression, varParentObjects)
rightExpr = self.build_equation_expression(rightExprNode, pathCondition, variablesInExpression, concatsInExpression, varParentObjects)
if equationNode < len(equationNodes)-1:
clauseBody += leftExpr + "=" + rightExpr + ","
else:
clauseBody += leftExpr + "=" + rightExpr
for concat in concatsInExpression:
clauseBody += ","
clauseBody += concat
clauseHead = "solve("
# variablesInExpression = list(set(variablesInExpression))
for var in range(0,len(variablesInExpression)):
if var < len(variablesInExpression)-1:
clauseHead += variablesInExpression[var] + ","
else:
clauseHead += variablesInExpression[var]
clauseHead += ")"
prologInput = clauseHead + ":-" + clauseBody
if self.verbosity >= 2 :
print "\nChecking with Prolog:"
print "----------------"
print prologInput
print "\n"
p = Prolog()
p.assertz(prologInput)
# l = list(p.query(clauseHead))
if self.verbosity >= 2 :
print "Clause head: " + clauseHead
result = list(p.query(clauseHead))
print "Prolog result: " + str(result)
if result == []:
if self.verbosity >= 2 : print "Prolog check failed!"
return False
else:
if self.verbosity >= 2 : print "Prolog check succeeded!"
return True
def _find_nodes_with_mm(self, graph, mm_names):
"""
Find all objects of a given type in a rules having theur type name in the mm_names set.
TODO: move this method to the himesis_utils file, together with the one from PyRamify
"""
nodes = []
for node in graph.vs:
if node["mm__"] in mm_names:
nodes.append(node)
return nodes
| 45.804878
| 308
| 0.593412
| 1,304
| 13,146
| 5.917945
| 0.173313
| 0.057017
| 0.026435
| 0.009071
| 0.785798
| 0.774653
| 0.774653
| 0.774653
| 0.752883
| 0.678632
| 0
| 0.009385
| 0.311045
| 13,146
| 287
| 309
| 45.804878
| 0.842663
| 0.465389
| 0
| 0.123711
| 0
| 0
| 0.046652
| 0
| 0
| 0
| 0
| 0.003484
| 0.010309
| 0
| null | null | 0
| 0.020619
| null | null | 0.082474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eb628ff80c1703f8ca4d5c9fc6c7de5998d632a9
| 2,212
|
py
|
Python
|
tests/loss/test_nll.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | 7
|
2022-01-27T05:38:02.000Z
|
2022-03-30T01:48:00.000Z
|
tests/loss/test_nll.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | null | null | null |
tests/loss/test_nll.py
|
nlp-greyfoss/metagrad
|
0f32f177ced1478f0c75ad37bace9a9fc4044ba3
|
[
"MIT"
] | 2
|
2022-02-22T07:47:02.000Z
|
2022-03-22T08:31:59.000Z
|
import numpy as np
import torch
import metagrad.functions as F
from metagrad.loss import NLLLoss
from metagrad.tensor import Tensor
def test_simple_nll_loss():
x = np.array([[0, 1, 2, 3], [4, 0, 2, 1]], np.float32)
t = np.array([3, 0]).astype(np.int32)
mx = Tensor(x, requires_grad=True)
mt = Tensor(np.eye(x.shape[-1])[t]) # 需要转换成one-hot向量
tx = torch.tensor(x, dtype=torch.float32, requires_grad=True)
tt = torch.tensor(t, dtype=torch.int64)
my_loss = NLLLoss()
torch_loss = torch.nn.NLLLoss()
# 先调用各自的log_softmax转换为对数概率
ml = my_loss(F.log_softmax(mx), mt)
tl = torch_loss(torch.log_softmax(tx, dim=-1, dtype=torch.float32), tt)
assert np.allclose(ml.item(), tl.item())
ml.backward()
tl.backward()
assert np.allclose(mx.grad.data, tx.grad.data)
def test_nll_loss():
N, CLS_NUM = 100, 10 # 样本数,类别数
x = np.random.randn(N, CLS_NUM)
t = np.random.randint(0, CLS_NUM, (N,))
mx = Tensor(x, requires_grad=True)
mt = Tensor(np.eye(x.shape[-1])[t]) # 需要转换成one-hot向量
tx = torch.tensor(x, dtype=torch.float32, requires_grad=True)
tt = torch.tensor(t, dtype=torch.int64)
my_loss = NLLLoss()
torch_loss = torch.nn.NLLLoss()
# 先调用各自的log_softmax转换为对数概率
ml = my_loss(F.log_softmax(mx), mt)
tl = torch_loss(torch.log_softmax(tx, dim=-1, dtype=torch.float32), tt)
assert np.allclose(ml.item(), tl.item())
ml.backward()
tl.backward()
assert np.allclose(mx.grad.data, tx.grad.data)
def test_simple_nll_loss_class_indices():
x = np.array([[0, 1, 2, 3], [4, 0, 2, 1]], np.float32)
t = np.array([3, 0])
mx = Tensor(x, requires_grad=True)
mt = Tensor(t) # 类别索引
tx = torch.tensor(x, dtype=torch.float32, requires_grad=True)
tt = torch.tensor(t, dtype=torch.int64)
my_loss = NLLLoss()
torch_loss = torch.nn.NLLLoss()
print(F.softmax(mx))
print(F.log_softmax(mx))
# 先调用各自的log_softmax转换为对数概率
ml = my_loss(F.log_softmax(mx), mt)
tl = torch_loss(torch.log_softmax(tx, dim=-1, dtype=torch.float32), tt)
assert np.allclose(ml.item(), tl.item())
ml.backward()
tl.backward()
assert np.allclose(mx.grad.data, tx.grad.data)
| 27.308642
| 75
| 0.647378
| 353
| 2,212
| 3.94051
| 0.192635
| 0.064702
| 0.069015
| 0.037383
| 0.824587
| 0.800863
| 0.800863
| 0.800863
| 0.777139
| 0.777139
| 0
| 0.030812
| 0.193038
| 2,212
| 81
| 76
| 27.308642
| 0.748459
| 0.052893
| 0
| 0.698113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 1
| 0.056604
| false
| 0
| 0.09434
| 0
| 0.150943
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eb85937236d1432d7cf2aa3c8afa6a02e719d805
| 106
|
py
|
Python
|
test/torchaudio_unittest/sox_io_backend/common.py
|
adefossez/audio
|
19fc580da97baf179395bb257647c5c25b993e42
|
[
"BSD-2-Clause"
] | 1
|
2021-04-20T09:04:24.000Z
|
2021-04-20T09:04:24.000Z
|
test/torchaudio_unittest/sox_io_backend/common.py
|
adefossez/audio
|
19fc580da97baf179395bb257647c5c25b993e42
|
[
"BSD-2-Clause"
] | null | null | null |
test/torchaudio_unittest/sox_io_backend/common.py
|
adefossez/audio
|
19fc580da97baf179395bb257647c5c25b993e42
|
[
"BSD-2-Clause"
] | 1
|
2019-09-11T08:27:18.000Z
|
2019-09-11T08:27:18.000Z
|
def name_func(func, _, params):
return f'{func.__name__}_{"_".join(str(arg) for arg in params.args)}'
| 35.333333
| 73
| 0.679245
| 17
| 106
| 3.764706
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 2
| 74
| 53
| 0.695652
| 0
| 0
| 0
| 0
| 0.5
| 0.556604
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
cce57d7777ef8f9a1514c6ae571b1ae493ca0e34
| 26,257
|
py
|
Python
|
datamodels/tests/test_fluxconversion_models.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
datamodels/tests/test_fluxconversion_models.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
datamodels/tests/test_fluxconversion_models.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Module test_fluxconversion_model - Contains the unit tests for the classes
in the datamodels.miri_fluxconversion_model module.
:History:
16 Jan 2013: Created.
21 Jan 2013: Warning messages controlled with Python warnings module.
05 Feb 2013: File closing problem solved by using "with" context manager.
08 Feb 2013: Replaced 'to_fits' with more generic 'save' method.
17 May 2013: Do not allow a blank table to be created.
22 Aug 2013: columnnames renamed to fieldnames. Check that the field
names declared in the class variable match the schema.
02 Sep 2013: Pass the responsibility for creating record arrays to jwst_lib
- a solution to the "Types in column 0 do not match" problem
suggested by Michael Droettboom at STScI.
Compare numpy record arrays in a way that it independent
of the byte ordering.
12 Sep 2013: Test that the data product can be copied successfully.
30 Oct 2013: LRS and MRS now use different flux conversion model classes.
09 Jul 2014: field_def changed to dq_def.
29 Aug 2014: Added test_referencefile.
07 Oct 2015: Made exception catching Python 3 compatible.
03 Dec 2015: Added MiriPowerlawColourCorrectionModel.
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
24 Oct 2017: Set the pixel size when testing MiriMrsFluxConversionModel
15 Nov 2018: MRS schema switched to use JWST mirmrs_photom.schema.
3-D versions of the MRS data are no longer accepted.
30 Jan 2019: Test that the REFTYPE and DATAMODL metadata is not altered
when the data model is saved to a file.
07 Oct 2019: FIXME: dq_def removed from unit tests until data corruption
bug fixed (Bug 589).
@author: Steven Beard (UKATC)
"""
import os
import unittest
import warnings
import numpy as np
from miri.datamodels.miri_fluxconversion_models import \
MiriImagingFluxconversionModel, MiriImagingColourCorrectionModel, \
MiriPowerlawColourCorrectionModel, MiriLrsFluxconversionModel, \
MiriMrsFluxconversionModel
from miri.datamodels.tests.util import assert_recarray_equal, \
assert_products_equal
class TestMiriImagingFluxconversionModel(unittest.TestCase):
# Test the MiriImagingFluxconversionModel class.
def setUp(self):
# Create a typical flux conversion product.
self.flux = [('F560W', 1.0, 0.0),
('F770W', 1.1, 0.0),
('F1000W', 1.2, 0.01),
('F1130W', 1.3, 0.0),
('F1280W', 1.4, 0.0),
('F1500W', 1.5, 0.02),
('F1800W', 1.6, 0.0),
('F2100W', 1.7, 0.03),
('F2550W', 1.8, 0.0),
]
self.dataproduct = MiriImagingFluxconversionModel( \
flux_table=self.flux )
self.testfile = "MiriImagingFluxconversion_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
del self.flux
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_referencefile(self):
# Check that the data product contains the standard
# reference file metadata.
type1 = self.dataproduct.meta.model_type
type2 = self.dataproduct.meta.reftype
self.assertIsNotNone(type1)
self.assertIsNotNone(type2)
pedigree = self.dataproduct.meta.pedigree
self.assertIsNotNone(pedigree)
def test_creation(self):
# Check that the field names in the class variable are the same
# as the ones declared in the schema.
class_names = list(MiriImagingFluxconversionModel.fieldnames)
schema_names = list(self.dataproduct.get_field_names('flux_table'))
self.assertEqual(class_names, schema_names,
"'fieldnames' class variable does not match schema")
# It must be possible to create an empty data product and fill
# in its contents later. This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nulldp = MiriImagingFluxconversionModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.flux_table = self.flux
self.assertIsNotNone(nulldp.flux_table)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
def test_copy(self):
# Test that a copy can be made of the data product.
# This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(datacopy.flux_table) )
table1 = np.asarray(self.dataproduct.flux_table)
table2 = np.asarray(datacopy.flux_table)
assert_recarray_equal(table1, table2)
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriImagingFluxconversionModel(self.testfile) as readback:
self.assertEqual(self.dataproduct.meta.reftype,
readback.meta.reftype)
self.assertEqual(self.dataproduct.meta.model_type,
readback.meta.model_type)
self.assertIsNotNone(readback.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(readback.flux_table) )
original = np.asarray(self.dataproduct.flux_table)
duplicate = np.asarray(readback.flux_table)
assert_recarray_equal(original, duplicate)
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
# Attempt to access the flux table through attributes.
descr = str(self.dataproduct.flux_table)
self.assertIsNotNone(descr)
del descr
class TestMiriImagingColourCorrectionModel(unittest.TestCase):
# Test the MiriImagingColourCorrectionModel class.
def setUp(self):
# Create a typical imaging flux conversion product.
self.flux = [(10.0, 'F560W', 1.0, 0.0),
(10.0, 'F770W', 1.1, 0.0),
(10.0, 'F1000W', 1.2, 0.01),
(10.0, 'F1130W', 1.3, 0.0),
(10.0, 'F1280W', 1.4, 0.0),
(10.0, 'F1500W', 1.5, 0.02),
(10.0, 'F1800W', 1.6, 0.0),
(10.0, 'F2100W', 1.7, 0.03),
(10.0, 'F2550W', 1.8, 0.0),
]
self.dataproduct = MiriImagingColourCorrectionModel( \
flux_table=self.flux )
self.testfile = "MiriImagingColourCorrectionModel_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
del self.flux
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_referencefile(self):
# Check that the data product contains the standard
# reference file metadata.
type1 = self.dataproduct.meta.model_type
type2 = self.dataproduct.meta.reftype
self.assertIsNotNone(type1)
self.assertIsNotNone(type2)
pedigree = self.dataproduct.meta.pedigree
self.assertIsNotNone(pedigree)
def test_creation(self):
# Check that the field names in the class variable are the same
# as the ones declared in the schema.
class_names = list(MiriImagingColourCorrectionModel.fieldnames)
schema_names = list(self.dataproduct.get_field_names('flux_table'))
self.assertEqual(class_names, schema_names,
"'fieldnames' class variable does not match schema")
# It must be possible to create an empty data product and fill
# in its contents later. This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nulldp = MiriImagingColourCorrectionModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.flux_table = self.flux
self.assertIsNotNone(nulldp.flux_table)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
def test_copy(self):
# Test that a copy can be made of the data product.
# This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(datacopy.flux_table) )
table1 = np.asarray(self.dataproduct.flux_table)
table2 = np.asarray(datacopy.flux_table)
assert_recarray_equal(table1, table2)
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriImagingColourCorrectionModel(self.testfile) as readback:
self.assertEqual(self.dataproduct.meta.reftype,
readback.meta.reftype)
self.assertEqual(self.dataproduct.meta.model_type,
readback.meta.model_type)
self.assertIsNotNone(readback.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(readback.flux_table) )
original = np.asarray(self.dataproduct.flux_table)
duplicate = np.asarray(readback.flux_table)
assert_recarray_equal(original, duplicate)
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
# Attempt to access the flux table through attributes.
descr = str(self.dataproduct.flux_table)
self.assertIsNotNone(descr)
del descr
class TestMiriPowerlawColourCorrectionModel(unittest.TestCase):
# Test the MiriImagingColourCorrectionModel class.
def setUp(self):
# Create a typical imaging flux conversion product.
self.flux = [(1.1, 'F560W', 1.0, 0.0),
(1.1, 'F770W', 1.1, 0.0),
(1.2, 'F1000W', 1.2, 0.01),
(1.3, 'F1130W', 1.3, 0.0),
(1.4, 'F1280W', 1.4, 0.0),
(1.5, 'F1500W', 1.5, 0.02),
(1.6, 'F1800W', 1.6, 0.0),
(1.7, 'F2100W', 1.7, 0.03),
(1.8, 'F2550W', 1.8, 0.0),
]
self.dataproduct = MiriPowerlawColourCorrectionModel( \
flux_table=self.flux )
self.testfile = "MiriPowerlawColourCorrectionModel_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
del self.flux
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_referencefile(self):
# Check that the data product contains the standard
# reference file metadata.
type1 = self.dataproduct.meta.model_type
type2 = self.dataproduct.meta.reftype
self.assertIsNotNone(type1)
self.assertIsNotNone(type2)
pedigree = self.dataproduct.meta.pedigree
self.assertIsNotNone(pedigree)
def test_creation(self):
# Check that the field names in the class variable are the same
# as the ones declared in the schema.
class_names = list(MiriPowerlawColourCorrectionModel.fieldnames)
schema_names = list(self.dataproduct.get_field_names('flux_table'))
self.assertEqual(class_names, schema_names,
"'fieldnames' class variable does not match schema")
# It must be possible to create an empty data product and fill
# in its contents later. This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nulldp = MiriPowerlawColourCorrectionModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.flux_table = self.flux
self.assertIsNotNone(nulldp.flux_table)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
def test_copy(self):
# Test that a copy can be made of the data product.
# This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(datacopy.flux_table) )
table1 = np.asarray(self.dataproduct.flux_table)
table2 = np.asarray(datacopy.flux_table)
assert_recarray_equal(table1, table2)
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriPowerlawColourCorrectionModel(self.testfile) as readback:
self.assertEqual(self.dataproduct.meta.reftype,
readback.meta.reftype)
self.assertEqual(self.dataproduct.meta.model_type,
readback.meta.model_type)
self.assertIsNotNone(readback.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(readback.flux_table) )
original = np.asarray(self.dataproduct.flux_table)
duplicate = np.asarray(readback.flux_table)
assert_recarray_equal(original, duplicate)
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
# Attempt to access the flux table through attributes.
descr = str(self.dataproduct.flux_table)
self.assertIsNotNone(descr)
del descr
class TestMiriLrsFluxconversionModel(unittest.TestCase):
# Test the MiriLrsFluxconversionModel class.
def setUp(self):
# Create a typical LRS flux conversion product.
self.flux = [( 2.0, 1.0, 0.0),
( 4.0, 1.1, 0.0),
( 6.0, 1.2, 0.01),
( 8.0, 1.3, 0.0),
(10.0, 1.4, 0.0),
(12.0, 1.5, 0.02),
(14.0, 1.6, 0.0),
(16.0, 1.7, 0.03),
(18.0, 1.8, 0.0),
]
self.dataproduct = MiriLrsFluxconversionModel( \
flux_table=self.flux )
self.testfile = "MiriLrsFluxconversion_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
del self.flux
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_referencefile(self):
# Check that the data product contains the standard
# reference file metadata.
type1 = self.dataproduct.meta.model_type
type2 = self.dataproduct.meta.reftype
self.assertIsNotNone(type1)
self.assertIsNotNone(type2)
pedigree = self.dataproduct.meta.pedigree
self.assertIsNotNone(pedigree)
def test_creation(self):
# Check that the field names in the class variable are the same
# as the ones declared in the schema.
class_names = list(MiriLrsFluxconversionModel.fieldnames)
schema_names = list(self.dataproduct.get_field_names('flux_table'))
self.assertEqual(class_names, schema_names,
"'fieldnames' class variable does not match schema")
# It must be possible to create an empty data product and fill
# in its contents later. This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nulldp = MiriLrsFluxconversionModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.flux_table = self.flux
self.assertIsNotNone(nulldp.flux_table)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
def test_copy(self):
# Test that a copy can be made of the data product.
# This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(datacopy.flux_table) )
table1 = np.asarray(self.dataproduct.flux_table)
table2 = np.asarray(datacopy.flux_table)
assert_recarray_equal(table1, table2)
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriLrsFluxconversionModel(self.testfile) as readback:
self.assertEqual(self.dataproduct.meta.reftype,
readback.meta.reftype)
self.assertEqual(self.dataproduct.meta.model_type,
readback.meta.model_type)
self.assertIsNotNone(readback.flux_table)
self.assertEqual( len(self.dataproduct.flux_table),
len(readback.flux_table) )
original = np.asarray(self.dataproduct.flux_table)
duplicate = np.asarray(readback.flux_table)
assert_recarray_equal(original, duplicate)
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
# Attempt to access the flux table through attributes.
descr = str(self.dataproduct.flux_table)
self.assertIsNotNone(descr)
del descr
class TestMiriMrsFluxconversionModel(unittest.TestCase):
# Test the MiriMrsFluxconversionModel class.
def setUp(self):
# Create a typical MRS flux conversion product.
self.flux = [(1.0, 1.1, 1.2),
(1.3, 1.4, 1.5),
(1.6, 1.7, 1.8)]
self.err = [(0.0, 0.01, 0.0),
(0.02, 0.0, 0.03),
(0.01, 0.04, 0.0)]
self.dq = [(1,0,0), (0,1,0), (1,0,1)]
self.pixsiz = [(1.01, 1.02, 1.03),
(1.04, 1.05, 1.06),
(1.07, 1.08, 1.09)]
self.dataproduct = MiriMrsFluxconversionModel( \
data=self.flux, err=self.err, dq=self.dq,
pixsiz=self.pixsiz )
self.testfile = "MiriMrsFluxconversion_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
del self.flux
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_referencefile(self):
# Check that the data product contains the standard
# reference file metadata.
type1 = self.dataproduct.meta.model_type
type2 = self.dataproduct.meta.reftype
self.assertIsNotNone(type1)
self.assertIsNotNone(type2)
pedigree = self.dataproduct.meta.pedigree
self.assertIsNotNone(pedigree)
def test_creation(self):
# It must be possible to create an empty data product and fill
# in its contents later. This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nulldp = MiriMrsFluxconversionModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
# NOTE: pixsiz must be defined first to prevent a
# "Wrong number of dimensions" exception.
nulldp.pixsiz = self.pixsiz
self.assertIsNotNone(nulldp.pixsiz)
nulldp.data = self.flux
self.assertIsNotNone(nulldp.data)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
def test_copy(self):
# Test that a copy can be made of the data product.
# This will generate a warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy.data)
self.assertEqual( len(self.dataproduct.data),
len(datacopy.data) )
flux1 = np.asarray(self.dataproduct.data)
flux2 = np.asarray(datacopy.data)
self.assertTrue( np.allclose(np.nan_to_num(flux1), np.nan_to_num(flux2)))
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriMrsFluxconversionModel(self.testfile) as readback:
self.assertEqual(self.dataproduct.meta.reftype,
readback.meta.reftype)
self.assertEqual(self.dataproduct.meta.model_type,
readback.meta.model_type)
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'err', 'dq'])
# FIXME: removed dq_def until data corruption bug fixed. Bug 589
# tables='dq_def' )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
# Attempt to access the flux data through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
# If being run as a main program, run the tests.
if __name__ == '__main__':
unittest.main()
| 41.090767
| 81
| 0.593556
| 2,895
| 26,257
| 5.316753
| 0.114335
| 0.080886
| 0.03086
| 0.031185
| 0.788137
| 0.767607
| 0.742009
| 0.726806
| 0.726806
| 0.726806
| 0
| 0.033363
| 0.325361
| 26,257
| 638
| 82
| 41.155172
| 0.835554
| 0.231024
| 0
| 0.751756
| 0
| 0
| 0.043372
| 0.009063
| 0
| 0
| 0
| 0.003135
| 0.210773
| 1
| 0.081967
| false
| 0
| 0.014052
| 0
| 0.107728
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ccfadcb2a649bb4accf6e5eacf8a6695479cb3b6
| 28
|
py
|
Python
|
acq4/devices/LEDLightSource/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/devices/LEDLightSource/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
acq4/devices/LEDLightSource/__init__.py
|
ablot/acq4
|
ba7cd340d9d0282640adb501d3788f8c0837e4c4
|
[
"MIT"
] | null | null | null |
from LEDLightSource import *
| 28
| 28
| 0.857143
| 3
| 28
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15c4ace2748e518067bbf742ab10a237fc1ae080
| 44
|
py
|
Python
|
anthropos/anthropos/__init__.py
|
flaxandteal/arches-anthropos-poc
|
12ecd976070120c24699cff2e03b3ad7b0b19156
|
[
"MIT"
] | null | null | null |
anthropos/anthropos/__init__.py
|
flaxandteal/arches-anthropos-poc
|
12ecd976070120c24699cff2e03b3ad7b0b19156
|
[
"MIT"
] | null | null | null |
anthropos/anthropos/__init__.py
|
flaxandteal/arches-anthropos-poc
|
12ecd976070120c24699cff2e03b3ad7b0b19156
|
[
"MIT"
] | null | null | null |
from .celery_local import app as celery_app
| 22
| 43
| 0.840909
| 8
| 44
| 4.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15cd85dfea3b837fb5768320e3c8caa598f67573
| 188
|
py
|
Python
|
kcfconvoy/__init__.py
|
maskot1977/kcfconvoy
|
846602843534ae1a97e16b1eff97d3be32c98119
|
[
"MIT"
] | null | null | null |
kcfconvoy/__init__.py
|
maskot1977/kcfconvoy
|
846602843534ae1a97e16b1eff97d3be32c98119
|
[
"MIT"
] | null | null | null |
kcfconvoy/__init__.py
|
maskot1977/kcfconvoy
|
846602843534ae1a97e16b1eff97d3be32c98119
|
[
"MIT"
] | 1
|
2017-12-22T02:21:52.000Z
|
2017-12-22T02:21:52.000Z
|
# coding: utf-8
from .Compound import Compound
from .KCFvec import KCFvec
from .KCFmat import KCFmat
from .Library import Library
from .util import similarity
from .util import Classifiers
| 26.857143
| 30
| 0.81383
| 27
| 188
| 5.666667
| 0.444444
| 0.104575
| 0.183007
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006173
| 0.138298
| 188
| 7
| 31
| 26.857143
| 0.938272
| 0.069149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15f329a893cf8a7852eb144c9092e643121f50df
| 37
|
py
|
Python
|
app/core/__init__.py
|
educaware/camera-server
|
80af9f763d0b7299acb2e3851a095f19aaa7e0e5
|
[
"MIT"
] | 80
|
2020-10-06T00:35:57.000Z
|
2022-03-31T19:56:24.000Z
|
app/core/__init__.py
|
educaware/camera-server
|
80af9f763d0b7299acb2e3851a095f19aaa7e0e5
|
[
"MIT"
] | 8
|
2022-02-28T19:11:51.000Z
|
2022-03-31T10:25:42.000Z
|
app/core/__init__.py
|
educaware/camera-server
|
80af9f763d0b7299acb2e3851a095f19aaa7e0e5
|
[
"MIT"
] | 24
|
2020-11-14T03:04:43.000Z
|
2022-03-11T15:44:44.000Z
|
from app.core.config import settings
| 18.5
| 36
| 0.837838
| 6
| 37
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15f5ebc131fdc723e8179ca13b2321881bfb8315
| 162
|
py
|
Python
|
me/commands/_utils.py
|
johnbenjaminlewis/me
|
974d11ea2dd2a877785351f200e4c8546672c1c4
|
[
"MIT"
] | 1
|
2015-11-24T15:10:20.000Z
|
2015-11-24T15:10:20.000Z
|
me/commands/_utils.py
|
johnbenjaminlewis/me
|
974d11ea2dd2a877785351f200e4c8546672c1c4
|
[
"MIT"
] | null | null | null |
me/commands/_utils.py
|
johnbenjaminlewis/me
|
974d11ea2dd2a877785351f200e4c8546672c1c4
|
[
"MIT"
] | null | null | null |
import click
def write(msg):
return click.secho(msg, fg='cyan', err=True)
def fail(msg):
click.secho(msg, fg='red', err=True)
raise SystemExit(1)
| 14.727273
| 48
| 0.648148
| 26
| 162
| 4.038462
| 0.615385
| 0.190476
| 0.247619
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.191358
| 162
| 10
| 49
| 16.2
| 0.793893
| 0
| 0
| 0
| 0
| 0
| 0.04321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c63eff3c90e622ffa260f3d7ea337978f6323b96
| 99
|
py
|
Python
|
flankers/tagmeapi/secret/keys.py
|
Mec-iS/chronostriples-backup
|
79bdd902dc1d4862597469d4ac127f41bb5c1059
|
[
"Apache-2.0"
] | null | null | null |
flankers/tagmeapi/secret/keys.py
|
Mec-iS/chronostriples-backup
|
79bdd902dc1d4862597469d4ac127f41bb5c1059
|
[
"Apache-2.0"
] | null | null | null |
flankers/tagmeapi/secret/keys.py
|
Mec-iS/chronostriples-backup
|
79bdd902dc1d4862597469d4ac127f41bb5c1059
|
[
"Apache-2.0"
] | null | null | null |
__author__ = ['lorenzo@pramantha.net']
def return_api_key():
return '**********************'
| 16.5
| 38
| 0.525253
| 9
| 99
| 5.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 99
| 5
| 39
| 19.8
| 0.534884
| 0
| 0
| 0
| 0
| 0
| 0.434343
| 0.434343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d6aea201cc93e6410f9e00902f225cd98c631db0
| 42
|
py
|
Python
|
python/testData/refactoring/move/importForMovedElementWithPreferredQualifiedImportStyle/before/src/a.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/move/importForMovedElementWithPreferredQualifiedImportStyle/before/src/a.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/move/importForMovedElementWithPreferredQualifiedImportStyle/before/src/a.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo():
bar()
def bar():
pass
| 7
| 10
| 0.452381
| 6
| 42
| 3.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 42
| 6
| 11
| 7
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.25
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ba52d9d26cd98a2419f3fed29c562594ecec07ce
| 20,701
|
py
|
Python
|
gym_collision_avoidance/experiments/src/master_config_deploy.py
|
meghdeepj/Social-Navigation-Simulator
|
806d304081bf5ff4fc7a0a58defb050627375865
|
[
"MIT"
] | null | null | null |
gym_collision_avoidance/experiments/src/master_config_deploy.py
|
meghdeepj/Social-Navigation-Simulator
|
806d304081bf5ff4fc7a0a58defb050627375865
|
[
"MIT"
] | null | null | null |
gym_collision_avoidance/experiments/src/master_config_deploy.py
|
meghdeepj/Social-Navigation-Simulator
|
806d304081bf5ff4fc7a0a58defb050627375865
|
[
"MIT"
] | null | null | null |
number_of_agent = 100
import os
from master_scenario_generator import Scenario_Generator, Seeded_Scenario_Generator, Seeded_Population_Scenario_Generator, real_dataset_traj
class Master_Config(object):
def __init__(self):
global_timeout = int(os.environ["global_timeout"])
global_experiment_number = int(os.environ["global_experiment_number"])
global_dataset_name = os.environ["global_dataset_name"]
global_population_density = float(os.environ["global_population_density"])
## print("FROM MASTER")
## print(os.environ["global_timeout"] )
## print(os.environ["global_experiment_number"])
## print(os.environ["global_dataset_name"])
## print(global_population_density)
self.exp_setting = None
#####################################################################################################################################################
# num mean num std dev vel mean vel std dev x_min x_max y_min y_max plot_size
self.ETH = [ 6.312138728 ,4.536521361 ,2.339926573 ,0.7502205478 ,-7.69 ,14.42 ,-3.17 ,13.21 , [[-10, 17], [-6, 16]] ]
self.HOTEL = [ 5.598098531 ,3.418910729 ,1.137002131 ,0.6487607538 ,-3.25 ,4.35 ,-10.31 ,4.31 , [[-7,7],[-13,7]] ]
self.UNIV = [ 40.83024533 ,6.734736777 ,0.6817478507 ,0.2481828799 ,-0.4619709156 ,15.46918556 ,-0.3183721728 ,13.89190962 , [[-3,17],[-3,15]] ]
self.ZARA1 = [ 5.87224158 ,3.213275774 ,1.12739064 ,0.2946279183 ,-0.1395383677 ,15.48055067 ,-0.3746958856 ,12.38644361 , [[-3,17],[-3,15]] ]
self.ZARA2 = [ 9.314121037 ,3.926104465 ,1.096467485 ,0.3849301882 ,-0.3577906864 ,15.55842276 ,-0.2737427903 ,13.94274416 , [[-3,17],[-3,15]] ]
########################################################################################################
# num mean num std dev vel mean vel std dev x_min x_max y_min y_max plot_size
#self.POPULATION = [ None ,None ,1 ,None ,0 ,10 ,0 ,10 , [[-1,11],[-1,11]] ]
self.POPULATION = [ None ,None ,1 ,None ,0 ,5 ,0 ,5 , [[-1,6],[-1,6]] ]
#generate random scenario here, write a function to generate and pass to self.scenario
if global_experiment_number == 1: #Simulate algorithm using settings from datasets! (e.g. ETH)
#print(global_dataset_name)
if global_dataset_name == "ETH" : self.exp_setting = self.ETH
elif global_dataset_name == "HOTEL" : self.exp_setting = self.HOTEL
elif global_dataset_name == "UNIV" : self.exp_setting = self.UNIV
elif global_dataset_name == "ZARA1" : self.exp_setting = self.ZARA1
elif global_dataset_name == "ZARA2" : self.exp_setting = self.ZARA2
self.PLT_LIMITS = self.exp_setting[8]
## print("FINALLY")
## print(self.PLT_LIMITS)
elif global_experiment_number == 2: #population density evaluation
#####for high population density, reduce size, hence less agents required######
#if global_population_density >= 0.5:
# self.POPULATION = [ None ,None ,1 ,None ,0 ,5 ,0 ,5 , [[-1,6],[-1,6]] ]
self.exp_setting = self.POPULATION
self.PLT_LIMITS = self.exp_setting[8]
elif global_experiment_number == 3: #touranment 1 vs n-1
self.exp_setting = self.POPULATION
self.PLT_LIMITS = self.exp_setting[8]
elif global_experiment_number == 4: #touranment 50% vs 50%
self.exp_setting = self.POPULATION
self.PLT_LIMITS = self.exp_setting[8]
############################################
#EvaluateConfig Level
self.EVALUATE_MODE = True
self.TRAIN_MODE = False
self.DT = 0.1 #0.1
self.MAX_TIME_RATIO = 3. #8.
#Formations config Level
self.SHOW_EPISODE_PLOTS = False #plot while sim
self.SAVE_EPISODE_PLOTS = self.ANIMATE_EPISODES = False #output gif + mp4
self.NEAR_GOAL_THRESHOLD = 0.2
#ETH [[-10, 17], [-6, 16]]
#HOTEL [[-7,7],[-13,7]]
#UNIV [[-3,17],[-3,15]]
#ZARA1 [[-3,17],[-3,15]]
#ZARA2 [[-3,17],[-3,15]]
#Population [[-1,11],[-1,11]]
#Motion prediction [[-10,10],[-10,10]]
self.PLT_FIG_SIZE = (10,10) #Actual hidden limit
self.PLOT_CIRCLES_ALONG_TRAJ = False
self.NUM_AGENTS_TO_TEST = [60]
#self.POLICIES_TO_TEST = ['GA3C-CADRL-10']
self.POLICIES_TO_TEST = ['GA3C-CADRL-10']*60 #['RVO']*number_of_agent#['STGCNN']*number_of_agent #['CADRL']*7#['NAVIGAN']*7#['RVO']*7#['GA3C-CADRL-10']*7
self.NUM_TEST_CASES = 2 #correspond to how many letters are there
self.MAX_NUM_OTHER_AGENTS_OBSERVED = number_of_agent * 3
self.MAX_NUM_AGENTS_IN_ENVIRONMENT = self.MAX_NUM_OTHER_AGENTS_OBSERVED + 1
self.agent_time_out = global_timeout #180 seconds normal #30 motion prediction
class Scenario_Config(object):
def __init__(self, experiment_number, algorithm_name, experiment_iteration_num, dataset_name=None, population_density=None):
self.exp_setting = None
#####################################################################################################################################################
# num mean num std dev vel mean vel std dev x_min x_max y_min y_max plot_size
self.ETH = [ 6.312138728 ,4.536521361 ,2.339926573 ,0.7502205478 ,-7.69 ,14.42 ,-3.17 ,13.21 , [[-10, 17], [-6, 16]] ]
self.HOTEL = [ 5.598098531 ,3.418910729 ,1.137002131 ,0.6487607538 ,-3.25 ,4.35 ,-10.31 ,4.31 , [[-7,7],[-13,7]] ]
self.UNIV = [ 40.83024533 ,6.734736777 ,0.6817478507 ,0.2481828799 ,-0.4619709156 ,15.46918556 ,-0.3183721728 ,13.89190962 , [[-3,17],[-3,15]] ]
self.ZARA1 = [ 5.87224158 ,3.213275774 ,1.12739064 ,0.2946279183 ,-0.1395383677 ,15.48055067 ,-0.3746958856 ,12.38644361 , [[-3,17],[-3,15]] ]
self.ZARA2 = [ 9.314121037 ,3.926104465 ,1.096467485 ,0.3849301882 ,-0.3577906864 ,15.55842276 ,-0.2737427903 ,13.94274416 , [[-3,17],[-3,15]] ]
########################################################################################################
# num mean num std dev vel mean vel std dev x_min x_max y_min y_max plot_size
#self.POPULATION = [ None ,None ,1 ,None ,0 ,10 ,0 ,10 , [[-1,11],[-1,11]] ]
self.POPULATION = [ None ,None ,1 ,None ,0 ,5 ,0 ,5 , [[-1,6],[-1,6]] ]
#generate random scenario here, write a function to generate and pass to self.scenario
if experiment_number == 1: #Simulate algorithm using settings from datasets! (e.g. ETH)
#print(dataset_name)
if dataset_name == "ETH" : self.exp_setting = self.ETH
elif dataset_name == "HOTEL" : self.exp_setting = self.HOTEL
elif dataset_name == "UNIV" : self.exp_setting = self.UNIV
elif dataset_name == "ZARA1" : self.exp_setting = self.ZARA1
elif dataset_name == "ZARA2" : self.exp_setting = self.ZARA2
self.scenario=[]
for i in range(experiment_iteration_num): #set radius from 0.2 to 0.05 to show slstm do better in low radius situation
#old approach to gen similar dataset based on speed, num of agents of certain dataset
#self.scenario.append( Seeded_Scenario_Generator( self.exp_setting[0], algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7] , self.exp_setting[2], 0.2 , 0, num_agents_stddev=self.exp_setting[1], pref_speed_stddev=self.exp_setting[3], random_seed=i ).random_square_edge() )
#just use the dataset's real traj
self.scenario.append( real_dataset_traj( dataset_name=dataset_name ).pick_start( None, algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7] , self.exp_setting[2],
0.2 , 0, random_seed=i, num_agents_override= round(self.exp_setting[0]) ) )
elif experiment_number == 2: #population density evaluation
#####for high population density, reduce size, hence less agents required######
#if population_density >= 0.5:
# self.POPULATION = [ None ,None ,1 ,None ,0 ,5 ,0 ,5 , [[-1,6],[-1,6]] ]
self.exp_setting = self.POPULATION
self.scenario=[]
for i in range(experiment_iteration_num):
self.scenario.append( Seeded_Population_Scenario_Generator( population_density, algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7], self.exp_setting[2], 0.2, 0, random_seed=i ).population_random_square_edge() )
elif experiment_number == 3: #touranment 1 vs n-1
self.exp_setting = self.POPULATION
self.scenario=[]
print(algorithm_name)
algorithm_name = algorithm_name.strip('][').split(',') #make sure it is transformed back to list
number_of_agents = int(round(population_density * ( ( self.POPULATION[5] - self.POPULATION[4] ) * ( self.POPULATION[7] - self.POPULATION[6] ) )))
for i in range(experiment_iteration_num):
temp_name = []
for j in range(number_of_agents):
if j==0:
temp_name.append( algorithm_name[0] )
else:
temp_name.append( algorithm_name[1] )
algorithm_name = temp_name
self.scenario.append( Seeded_Population_Scenario_Generator( population_density, algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7], self.exp_setting[2], 0.2, 0, random_seed=i ).population_random_square_edge() )
elif experiment_number == 4: #touranment 50% vs 50%
self.exp_setting = self.POPULATION
self.scenario=[]
print(algorithm_name)
algorithm_name = algorithm_name.strip('][').split(',') #make sure it is transformed back to list
number_of_agents = int(round(population_density * ( ( self.POPULATION[5] - self.POPULATION[4] ) * ( self.POPULATION[7] - self.POPULATION[6] ) )))
for i in range(experiment_iteration_num):
temp_name = []
#number of agents from population density, retrieved from master scenario generator
for j in range(number_of_agents):
if (j%2)==0:
temp_name.append( algorithm_name[0] )
else:
temp_name.append( algorithm_name[1] )
algorithm_name = temp_name
print(algorithm_name)
self.scenario.append( Seeded_Population_Scenario_Generator( population_density, algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7], self.exp_setting[2], 0.2, 0, random_seed=i ).population_random_square_edge() )
elif experiment_number == 5: #mixture of agents, # agents decided by human dataset
self.exp_setting = self.POPULATION
# number_of_agents = get_number_of_agents(observation)
#num agents = population density * area of scenario
number_of_agents = int(round(population_density * ( ( self.POPULATION[5] - self.POPULATION[4] ) * ( self.POPULATION[7] - self.POPULATION[6] ) )))
self.scenario=[]
for i in range(experiment_iteration_num):
temp_name = []
for j in range(number_of_agents):
temp_name.append(algorithm_name[0])
# temp_name = get_algorithms(algorithm_name, number_of_agents) #sample from pool of algorithms
algorithm_name = temp_name
print(algorithm_name)
self.scenario.append( Seeded_Population_Scenario_Generator( population_density, algorithm_name, self.exp_setting[4],self.exp_setting[5], self.exp_setting[6], self.exp_setting[7], self.exp_setting[2], 0.2, 0, random_seed=i ).population_random_square_edge() )
'''
self.scenario=[]
for i in range(experiment_iteration_num): #100
#######
#random seed
#
#(ETH) GA3C-CADRL
#self.scenario.append( Scenario_Generator( 6.312138728, "GA3C-CADRL-10", -7.69, 14.42, -3.17, 13.21 , 2.339926573, 0.05 , 0, num_agents_stddev=4.536521361, pref_speed_stddev=0.7502205478 ).random_square_edge() )
###################################FULL traj output 100 0.05 radius ###########################
#fixed seed
#(ETH) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 6.312138728, "GA3C-CADRL-10", -7.69, 14.42, -3.17, 13.21 , 2.339926573, 0.05 , 0, num_agents_stddev=4.536521361, pref_speed_stddev=0.7502205478, random_seed=i ).random_square_edge() )
#(HOTEL) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 5.598098531, "GA3C-CADRL-10", -3.25, 4.35, -10.31, 4.31 , 1.137002131, 0.05 , 0, num_agents_stddev=3.418910729, pref_speed_stddev=0.6487607538, random_seed=i ).random_square_edge() )
#(UNIV) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 40.83024533, "CADRL", -0.4619709156, 15.46918556, -0.3183721728, 13.89190962 , 0.6817478507, 0.05 , 0, num_agents_stddev=6.734736777, pref_speed_stddev=0.2481828799, random_seed=i ).random_square_edge() )
#(ZARA1) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 5.87224158 , "RVO", -0.1395383677, 15.48055067, -0.3746958856, 12.38644361 , 1.12739064, 0.05 , 0, num_agents_stddev=3.213275774, pref_speed_stddev=0.2946279183, random_seed=i ).random_square_edge() )
#(ZARA2) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 9.314121037, "CADRL", -0.3577906864, 15.55842276,-0.2737427903, 13.94274416 , 1.096467485, 0.05 , 0, num_agents_stddev=3.926104465, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
###################################FULL traj output 20 0.2 radius ###########################
#fixed seed
#(ETH) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 6.312138728, "RVO", -7.69, 14.42, -3.17, 13.21 , 2.339926573, 0.2 , 0, num_agents_stddev=4.536521361, pref_speed_stddev=0.7502205478, random_seed=i ).random_square_edge() )
#(HOTEL) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 5.598098531, "GA3C-CADRL-10", -3.25, 4.35, -10.31, 4.31 , 1.137002131, 0.2 , 0, num_agents_stddev=3.418910729, pref_speed_stddev=0.6487607538, random_seed=i ).random_square_edge() )
#(UNIV) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 40.83024533, "RVO", -0.4619709156, 15.46918556, -0.3183721728, 13.89190962 , 0.6817478507, 0.2 , 0, num_agents_stddev=6.734736777, pref_speed_stddev=0.2481828799, random_seed=i ).random_square_edge() )
#(ZARA1) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 5.87224158 , "RVO", -0.1395383677, 15.48055067, -0.3746958856, 12.38644361 , 1.12739064, 0.2 , 0, num_agents_stddev=3.213275774, pref_speed_stddev=0.2946279183, random_seed=i ).random_square_edge() )
#(ZARA2) GA3C-CADRL fixed seed
#self.scenario.append( Seeded_Scenario_Generator( 9.314121037, "RVO", -0.3577906864, 15.55842276,-0.2737427903, 13.94274416 , 1.096467485, 0.2 , 0, num_agents_stddev=3.926104465, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
#(ZARA2) testing with SPEC / STGCNN
#self.scenario.append( Seeded_Scenario_Generator( 15, "SLSTM", -5, 5,-5, 5 , 1.096467485, 0.2 , 0, num_agents_stddev=0.001, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
#self.scenario.append( Seeded_Scenario_Generator( 15, "SOCIALGAN", -5, 5,-5, 5 , 1.096467485, 0.2 , 0, num_agents_stddev=0.001, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
#self.scenario.append( Seeded_Scenario_Generator( 30, "SLSTM", -6, 6,-6, 6 , 1.096467485, 0.2 , 0, num_agents_stddev=0.001, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
#self.scenario.append( Seeded_Scenario_Generator( 5, "SOCIALGAN", -3, 3,-3, 3 , 1.096467485, 0.2 , 0, num_agents_stddev=0.001, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
self.scenario.append( Seeded_Scenario_Generator( 30, "SPEC", -6, 6,-6, 6 , 1.096467485, 0.2 , 0, num_agents_stddev=0.001, pref_speed_stddev=0.3849301882, random_seed=i ).random_square_edge() )
#self.scenario.append( Seeded_Scenario_Generator( 20, "SPEC", -10, 10,-10, 10 , 1, 0.2 , 0, num_agents_stddev=0.01, pref_speed_stddev=0, random_seed=i ).random_square_edge() )
################Population density fixed seed fixed speed (1m/s), 0.2m radius gradually increase density, 10x10m #############################
#0.1
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.1, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.15
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.15, "GA3C-CADRL-10", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.2
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.2, "GA3C-CADRL-10", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.25
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.25, "GA3C-CADRL-10", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.3
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.3, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.35
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.35, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.4
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.4, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.45
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.45, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.5
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.5, "RVO", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
#0.55
#self.scenario.append( Seeded_Population_Scenario_Generator( 0.55, "GA3C-CADRL-10", 0, 10, 0, 10, 1, 0.2, 0, random_seed=i ).population_random_square_edge() )
'''
| 65.71746
| 333
| 0.5706
| 2,572
| 20,701
| 4.378305
| 0.104199
| 0.035432
| 0.070864
| 0.066069
| 0.815647
| 0.798686
| 0.777551
| 0.763343
| 0.717077
| 0.664861
| 0
| 0.150284
| 0.278054
| 20,701
| 314
| 334
| 65.926752
| 0.603212
| 0.17574
| 0
| 0.566372
| 0
| 0
| 0.015432
| 0.005215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.017699
| 0
| 0.053097
| 0.035398
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bab24de2b96a3c5336050dbf200a0de2ba05b53d
| 23
|
py
|
Python
|
startin/__init__.py
|
hugoledoux/testmaturin
|
1309e65a22ace2288300d7f8db3500234c71d542
|
[
"MIT"
] | null | null | null |
startin/__init__.py
|
hugoledoux/testmaturin
|
1309e65a22ace2288300d7f8db3500234c71d542
|
[
"MIT"
] | null | null | null |
startin/__init__.py
|
hugoledoux/testmaturin
|
1309e65a22ace2288300d7f8db3500234c71d542
|
[
"MIT"
] | null | null | null |
from .startin import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bab7241eb856d660e790dab61dbfd365f8f5e179
| 293
|
py
|
Python
|
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/rule_based_profiler/domain_builder/__init__.py
|
vikramaditya91/great_expectations
|
4ebcdc0414bec3cf336b43cc54ca63bddb05bac3
|
[
"Apache-2.0"
] | null | null | null |
from .column_domain_builder import ColumnDomainBuilder
from .inferred_semantic_domain_type import InferredSemanticDomainType
from .simple_column_suffix_domain_builder import SimpleColumnSuffixDomainBuilder
from .simple_semantic_type_domain_builder import SimpleSemanticTypeColumnDomainBuilder
| 58.6
| 86
| 0.931741
| 29
| 293
| 8.965517
| 0.482759
| 0.15
| 0.219231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054608
| 293
| 4
| 87
| 73.25
| 0.938628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bafa2dce51f9f59ab4952241dee1fbfa4b8569a9
| 1,904
|
py
|
Python
|
benchml/models/mod_dscribe.py
|
rudolfspetrovs/benchml
|
896673f387a6bb9b185664ddd54f569a1ba54e51
|
[
"Apache-2.0"
] | 3
|
2021-08-12T13:25:31.000Z
|
2022-03-21T21:30:22.000Z
|
benchml/models/mod_dscribe.py
|
rudolfspetrovs/benchml
|
896673f387a6bb9b185664ddd54f569a1ba54e51
|
[
"Apache-2.0"
] | 5
|
2020-12-08T08:59:41.000Z
|
2022-01-22T06:46:09.000Z
|
benchml/models/mod_dscribe.py
|
rudolfspetrovs/benchml
|
896673f387a6bb9b185664ddd54f569a1ba54e51
|
[
"Apache-2.0"
] | 1
|
2021-06-25T11:07:32.000Z
|
2021-06-25T11:07:32.000Z
|
import numpy as np
import benchml.transforms as btf
from benchml.hyper import GridHyper, Hyper
def compile_dscribe(**kwargs):
return [
btf.Module(
tag=DescriptorClass.__name__ + "_ridge",
transforms=[
btf.ExtXyzInput(tag="input"),
DescriptorClass(tag="descriptor", inputs={"configs": "input.configs"}),
btf.ReduceMatrix(tag="reduce", inputs={"X": "descriptor.X"}),
btf.Ridge(tag="predictor", inputs={"X": "reduce.X", "y": "input.y"}),
],
hyper=GridHyper(
Hyper(
{
"predictor.alpha": np.logspace(-5, +5, 7),
}
)
),
broadcast={"meta": "input.meta"},
outputs={"y": "predictor.y"},
)
for DescriptorClass in [btf.DscribeCM, btf.DscribeACSF, btf.DscribeMBTR, btf.DscribeLMBTR]
]
def compile_dscribe_periodic(**kwargs):
return [
btf.Module(
tag=DescriptorClass.__name__ + "_ridge",
transforms=[
btf.ExtXyzInput(tag="input"),
DescriptorClass(tag="descriptor", inputs={"configs": "input.configs"}),
btf.ReduceMatrix(tag="reduce", inputs={"X": "descriptor.X"}),
btf.Ridge(tag="predictor", inputs={"X": "reduce.X", "y": "input.y"}),
],
hyper=GridHyper(
Hyper(
{
"predictor.alpha": np.logspace(-5, +5, 7),
}
)
),
broadcast={"meta": "input.meta"},
outputs={"y": "predictor.y"},
)
for DescriptorClass in [btf.DscribeSineMatrix]
]
def register_all():
return {
"dscribe": compile_dscribe,
"dscribe_periodic": compile_dscribe_periodic,
}
| 31.733333
| 98
| 0.491597
| 164
| 1,904
| 5.597561
| 0.286585
| 0.061002
| 0.037037
| 0.045752
| 0.708061
| 0.708061
| 0.708061
| 0.708061
| 0.708061
| 0.708061
| 0
| 0.004951
| 0.363445
| 1,904
| 59
| 99
| 32.271186
| 0.752475
| 0
| 0
| 0.576923
| 0
| 0
| 0.145483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| true
| 0
| 0.057692
| 0.057692
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
244237b3e9b442b3781e371aceb7ae3d2ea0533d
| 33
|
py
|
Python
|
src/models/__init__.py
|
jwoos/web_sticky-note
|
72c6b7b4b7bf7b7d528ce714dd091e9581b10042
|
[
"MIT"
] | null | null | null |
src/models/__init__.py
|
jwoos/web_sticky-note
|
72c6b7b4b7bf7b7d528ce714dd091e9581b10042
|
[
"MIT"
] | 3
|
2017-12-29T04:47:05.000Z
|
2017-12-29T04:59:22.000Z
|
src/models/__init__.py
|
jwoos/web_ephemeral-notes
|
72c6b7b4b7bf7b7d528ce714dd091e9581b10042
|
[
"MIT"
] | null | null | null |
from src.models.note import Note
| 16.5
| 32
| 0.818182
| 6
| 33
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0364b9fd430536b7bc95e4af8b51fcfd04f625db
| 18
|
py
|
Python
|
Zoocmd/new_core/version.py
|
helicontech/zoo
|
a33ba547f553bcce415f7a54bd89c444f82e48ee
|
[
"Apache-2.0"
] | 2
|
2017-05-01T07:35:24.000Z
|
2018-04-12T13:36:03.000Z
|
Zoocmd/new_core/version.py
|
helicontech/zoo
|
a33ba547f553bcce415f7a54bd89c444f82e48ee
|
[
"Apache-2.0"
] | 2
|
2017-03-23T17:28:37.000Z
|
2018-06-07T06:38:08.000Z
|
Zoocmd/new_core/version.py
|
helicontech/zoo
|
a33ba547f553bcce415f7a54bd89c444f82e48ee
|
[
"Apache-2.0"
] | 3
|
2016-06-22T11:11:16.000Z
|
2019-10-25T15:09:46.000Z
|
VERSION="1.0.0.0"
| 9
| 17
| 0.611111
| 5
| 18
| 2.2
| 0.6
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.055556
| 18
| 1
| 18
| 18
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
300c64dad9a2edf3f721b3b96a0c044a8213f7ff
| 48
|
py
|
Python
|
client/py_client/utils/datasources/rest/__init__.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | 1
|
2022-03-29T06:56:06.000Z
|
2022-03-29T06:56:06.000Z
|
client/py_client/utils/datasources/rest/__init__.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | 3
|
2022-01-17T09:31:21.000Z
|
2022-03-11T12:12:08.000Z
|
client/py_client/utils/datasources/rest/__init__.py
|
thefstock/FirstockPy
|
09b4dcf3470f83de991b43213958d2c6783f997b
|
[
"MIT"
] | null | null | null |
from .datasource import *
from .context import *
| 24
| 25
| 0.770833
| 6
| 48
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 26
| 24
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
30187c78d73285d630770d46ffccbd1723da950a
| 146
|
py
|
Python
|
djstripe/middleware.py
|
ExtraE113/dj-stripe
|
1b50be13fc99b624388a005b8aa1e26c57392203
|
[
"MIT"
] | 937
|
2017-06-04T18:44:20.000Z
|
2022-03-27T07:28:32.000Z
|
djstripe/middleware.py
|
ExtraE113/dj-stripe
|
1b50be13fc99b624388a005b8aa1e26c57392203
|
[
"MIT"
] | 969
|
2017-06-05T01:57:20.000Z
|
2022-03-31T23:42:54.000Z
|
djstripe/middleware.py
|
ExtraE113/dj-stripe
|
1b50be13fc99b624388a005b8aa1e26c57392203
|
[
"MIT"
] | 309
|
2017-06-12T03:18:10.000Z
|
2022-03-29T17:05:18.000Z
|
"""dj-stripe middleware
"""
from django.utils.deprecation import MiddlewareMixin
class SubscriptionPaymentMiddleware(MiddlewareMixin):
pass
| 18.25
| 53
| 0.808219
| 13
| 146
| 9.076923
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 146
| 7
| 54
| 20.857143
| 0.907692
| 0.136986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
062f41794cd6f4e55101288d94256a3eb2c71d8f
| 43
|
py
|
Python
|
scripts/dataset_scripts/__init__.py
|
tedmaksym/leadingtophytoplankton
|
165a4ccf5b79ef7858be3b13551728fe7873a659
|
[
"BSD-3-Clause"
] | 1
|
2021-01-09T17:06:40.000Z
|
2021-01-09T17:06:40.000Z
|
scripts/dataset_scripts/__init__.py
|
tedmaksym/leadingtophytoplankton
|
165a4ccf5b79ef7858be3b13551728fe7873a659
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/dataset_scripts/__init__.py
|
tedmaksym/leadingtophytoplankton
|
165a4ccf5b79ef7858be3b13551728fe7873a659
|
[
"BSD-3-Clause"
] | 2
|
2020-06-30T07:50:30.000Z
|
2021-02-15T06:24:17.000Z
|
from .dataset import *
from .atl03 import *
| 21.5
| 22
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.162791
| 43
| 2
| 23
| 21.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0632a3278ccc3c2914e3089c25b6ae0f281cbc8d
| 157
|
py
|
Python
|
terrascript/powerdns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/powerdns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/powerdns/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/powerdns/r.py
import terrascript
class powerdns_zone(terrascript.Resource):
pass
class powerdns_record(terrascript.Resource):
pass
| 14.272727
| 44
| 0.783439
| 18
| 157
| 6.722222
| 0.555556
| 0.214876
| 0.380165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140127
| 157
| 10
| 45
| 15.7
| 0.896296
| 0.159236
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
067ea90b8bd8566019892dc493c8981cad41e2e9
| 16,085
|
py
|
Python
|
IRM_data/Fig5/Fig5b/Fig5b.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
IRM_data/Fig5/Fig5b/Fig5b.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
IRM_data/Fig5/Fig5b/Fig5b.py
|
wangqf1997/Human-injury-based-safety-decision-of-automated-vehicles
|
b104fdeb3d85e867f6b04c5ae7b5a197e705aeba
|
[
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: Qingfan Wang, Qing Zhou, Miao Lin, Bingbing Nie
Corresponding author: Bingbing Nie (nbb@tsinghua.edu.cn)
-------------------------------------------------------------------------------------------------
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
def resize_rotate(image, angle, l_, w_):
''' resize and rotate the figure. '''
image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_))))
# grab the dimensions of the image and then determine the center.
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix and the sine and cosine.
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image.
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation.
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image.
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
def main():
''' Plot Fig5b. '''
# Load general data.
img_ini_00 = mpimg.imread('../../image/gray__.png')
img_ini_0 = mpimg.imread('../../image/gray.png')
img_ini_1 = mpimg.imread('../../image/blue.png')
img_ini_2 = mpimg.imread('../../image/green.png')
img_ini_3 = mpimg.imread('../../image/orange.png')
# Load parameters.
veh_l_1, veh_l_2, veh_w_1, veh_w_2 = 3.995, 4.07, 1.615, 1.615
color = ['gray', '#3B89F0', '#41B571', '#FFB70A', '#FF5050']
''' Plot Fig5b_1. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(4.5, 4.5))
font1 = {'family': 'Arial', 'size': 15}
plt.xlabel("Activation time before the collision [ms]", font1)
plt.ylabel('Reduction of OISS [%]', font1, labelpad=-3.5)
plt.xticks(np.arange(0, 101, 20), np.arange(0, 101, 20) * 10 - 1000, family='Arial', fontsize=15)
plt.yticks(family='Arial', fontsize=15)
plt.subplots_adjust(left=0.15, wspace=0.25, hspace=0.25, bottom=0.13, top=0.97, right=0.97)
# Load data.
data = np.load('data/Fig5b_1.npz')
# Plot Fig5b_1.
plt.plot(np.arange(0, 101, 10), data['Inj_EB'], color='#3B89F0', marker='o', linestyle='dashed', linewidth=1,
markersize=6)
plt.plot(np.arange(0, 101, 10), data['Inj_S1'], color='#41B571', marker='v', linestyle='dashed', linewidth=1,
markersize=6)
plt.plot(np.arange(0, 101, 10), data['Inj_S2'], color='#FFB70A', marker='^', linestyle='dashed', linewidth=1,
markersize=6)
plt.plot(np.arange(0, 101, 10), data['Inj_S3'], color='#FF5050', marker='s', linestyle='dashed', linewidth=1,
markersize=6, clip_on=False)
# Show.
plt.show()
# plt.savefig('Fig5b_1.png', dpi=600)
plt.close()
''' Plot Fig5b_2. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(2, 2 / 28 * 24))
plt.axis('equal')
plt.xlim((-4, 24))
plt.ylim((-11 + 0.5, 13 + 0.5))
plt.xticks([], family='Arial', fontsize=15)
plt.yticks([], family='Arial', fontsize=15)
plt.subplots_adjust(left=0.02, wspace=0.25, hspace=0.25, bottom=0.02, top=0.98, right=0.98)
# Load data.
data = np.load('data/Fig5b_2.npz')
# Plot road information.
x = data['road_x']
y = data['road_y']
plt.plot(x + 3.5, y - 1, color='gray', linestyle='-', linewidth=1.3, alpha=0.7)
plt.plot(x[:35] + 3.5, y[:35] - 8.6, color='orange', linestyle='-', linewidth=0.7, alpha=0.5)
plt.plot(x[:35] + 3.5, y[:35] - 8.6, color='orange', linestyle='-', linewidth=0.7, alpha=0.5)
plt.plot(x[0:36][::-1] + 3.5, y[:36][::-1] - 4.75, color='gray', linestyle=(0, (10, 8)), linewidth=1, alpha=0.35)
plt.plot(x[0:33][::-1] + 3.5, y[:33][::-1] - 12.25, color='gray', linestyle=(0, (10, 8)), linewidth=1, alpha=0.35)
plt.plot(x[:-65] + 3.5, y[:-65] - 16, color='gray', linestyle='-', linewidth=1.3, alpha=0.7)
plt.plot(x[:-65] + 3.5, -y[:-65] + 10.508, color='gray', linestyle='-', linewidth=1.3, alpha=0.7)
plt.plot(x[:-68][::-1] - 4, -y[:-68][::-1] + 8.508, color='orange', linestyle=(0, (10, 8)), linewidth=1, alpha=0.5)
plt.plot(x[:-10] - 0.5, -y[:-10] + 3.508, color='gray', linestyle='-', linewidth=1.3, alpha=0.7)
plt.plot([x[34] + 3.5, x[30] - 0.3], [y[34] - 8.5, y[30] - 2], color='gray', linestyle='-', linewidth=1.3,
alpha=0.5)
plt.plot(x[70:] + 3.5, y[70:] - 8.7, color='orange', linestyle='-', linewidth=0.7, alpha=0.5)
plt.plot(x[70:] + 3.5, y[70:] - 8.9, color='orange', linestyle='-', linewidth=0.7, alpha=0.5)
plt.plot(x[70:] + 3.6, y[70:] - 4.95, color='gray', linestyle=(0, (10, 8)), linewidth=1, alpha=0.35)
plt.plot([x[70] + 3.5, x[70] + 3.6], [y[70] - 8.8, y[70] - 17], color='gray', linestyle='-', linewidth=1.3,
alpha=0.5)
plt.plot(x[70:] + 3.5, y[70:] - 12.95, color='gray', linestyle=(0, (10, 8)), linewidth=1, alpha=0.35)
# Plot vehicle information.
img = resize_rotate(img_ini_00, np.rad2deg(data['traj_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.003 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_x1'][-1], data['traj_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_00, np.rad2deg(data['traj_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.003 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_x2'][-1], data['traj_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_x1'], data['traj_y1'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_x2'], data['traj_y2'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig5b_2.png', dpi=600)
plt.close()
''' Plot Fig5b_3. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.5 * 1.1, 3.5 / 9 * 6 * 1.1))
plt.axis('equal')
plt.xlim((5.5, 5.5 + 9))
plt.ylim((-3.8, -3.8 + 6))
plt.xticks(np.arange(5.5, 5.5 + 10, 3), np.arange(0, 10, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-3.8, -3.8 + 8, 3), np.arange(0, 8, 3), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.11, bottom=0.11, top=0.96, right=0.96)
# Load data.
data = np.load('data/Fig5b_3.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_EB_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_EB_x1'][-1], data['traj_EB_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['traj_EB_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_EB_x2'][-1], data['traj_EB_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_EB_x1'], data['traj_EB_y1'], color=color[1], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_EB_x2'], data['traj_EB_y2'], color=color[1], linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig5b_3.png', dpi=600)
plt.close()
''' Plot Fig5b_4. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.5 * 1.1, 3.5 / 9 * 6 * 1.1))
plt.axis('equal')
plt.xlim((5.5, 5.5 + 9))
plt.ylim((-3.8, -3.8 + 6))
plt.xticks(np.arange(5.5, 5.5 + 10, 3), np.arange(0, 10, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-3.8, -3.8 + 8, 3), np.arange(0, 8, 3), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.11, bottom=0.11, top=0.96, right=0.96)
# Load data.
data = np.load('data/Fig5b_4.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_2, np.rad2deg(data['traj_S1_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S1_x1'][-1], data['traj_S1_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_2, np.rad2deg(data['traj_S1_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S1_x2'][-1], data['traj_S1_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S1_x1'], data['traj_S1_y1'], color=color[2], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S1_x2'], data['traj_S1_y2'], color=color[2], linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig5b_4.png', dpi=600)
plt.close()
''' Plot Fig5b_5. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.5 * 1.1, 3.5 / 9 * 6 * 1.1))
plt.axis('equal')
plt.xlim((5.5, 5.5 + 9))
plt.ylim((-3.8, -3.8 + 6))
plt.xticks(np.arange(5.5, 5.5 + 10, 3), np.arange(0, 10, 3), family='Arial', fontsize=14)
plt.yticks(np.arange(-3.8, -3.8 + 8, 3), np.arange(0, 8, 3), family='Arial', fontsize=14)
plt.subplots_adjust(wspace=0.25, hspace=0.25, left=0.11, bottom=0.11, top=0.96, right=0.96)
# Load data.
data = np.load('data/Fig5b_5.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x1'][-1], data['traj_Re_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_0, np.rad2deg(data['traj_Re_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_Re_x2'][-1], data['traj_Re_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_3, np.rad2deg(data['traj_S2_t1'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0155 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S2_x1'][-1], data['traj_S2_y1'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_3, np.rad2deg(data['traj_S2_t2'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0155 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['traj_S2_x2'][-1], data['traj_S2_y2'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['traj_Re_x1'], data['traj_Re_y1'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_Re_x2'], data['traj_Re_y2'], color=color[0], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S2_x1'], data['traj_S2_y1'], color=color[3], linestyle='--', linewidth=1.3, alpha=0.5)
plt.plot(data['traj_S2_x2'], data['traj_S2_y2'], color=color[3], linestyle='--', linewidth=1.3, alpha=0.5)
# Show.
plt.show()
# plt.savefig('Fig5b_5.png', dpi=600)
plt.close()
''' Plot Fig5b_6. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(5.15, 1.8))
font1 = {'family': 'Arial', 'size': 14}
plt.xlabel("Time [ms]", font1, labelpad=-0.6)
plt.ylabel('Velocity [m/s]', font1, labelpad=3)
plt.xticks(np.arange(0, 126, 20), np.arange(0, 126, 20) * 10, family='Arial', fontsize=14)
plt.yticks(family='Arial', fontsize=14)
plt.xlim([-5, 125])
plt.subplots_adjust(left=0.11, wspace=0.25, hspace=0.25, bottom=0.25, top=0.96, right=0.99)
# Load data.
data = np.load('data/Fig5b_6.npz')
# Plot dynamics information.
plt.plot(data['traj_Re_V1'], color='lightgray', linestyle='dashed', linewidth=2, zorder=10)
plt.plot(data['traj_EB_V1'], color='#3B89F0', linestyle='dashed', linewidth=2, zorder=9)
plt.plot(data['traj_S1_V1'], color='#41B571', linestyle='dashed', linewidth=2, zorder=8)
plt.plot(data['traj_S2_V1'], color='#FFB70A', linestyle='dashed', linewidth=2, zorder=7)
plt.plot(data['traj_Re_V2'], color='lightgray', linestyle='-.', linewidth=2, zorder=5)
plt.plot(data['traj_EB_V2'], color='#3B89F0', linestyle='-.', linewidth=2, zorder=4)
plt.plot(data['traj_S1_V2'], color='#41B571', linestyle='-.', linewidth=2, zorder=3)
plt.plot(data['traj_S2_V2'], color='#FFB70A', linestyle='-.', linewidth=2, zorder=2)
# Show.
plt.show()
# plt.savefig('Fig5b_6.png', dpi=600)
plt.close()
''' Plot Fig5b_7. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(5.15, 1.8))
font1 = {'family': 'Arial', 'size': 14}
plt.xlabel("Time [ms]", font1, labelpad=-0.6)
plt.ylabel('Yaw rate [deg/s]', font1, labelpad=1)
plt.xticks(np.arange(0, 126, 20), np.arange(0, 126, 20) * 10, family='Arial', fontsize=14)
plt.yticks(family='Arial', fontsize=14)
plt.xlim([-5, 125])
plt.subplots_adjust(left=0.13, wspace=0.25, hspace=0.25, bottom=0.25, top=0.96, right=0.99)
# Load data.
data = np.load('data/Fig5b_7.npz')
# Plot dynamics information.
plt.plot(np.rad2deg(data['traj_Re_W1']), color='lightgray', linestyle='dashed', linewidth=2, zorder=10)
plt.plot(np.rad2deg(data['traj_EB_W1']), color='#3B89F0', linestyle='dashed', linewidth=2, zorder=9)
plt.plot(np.rad2deg(data['traj_S1_W1']), color='#41B571', linestyle='dashed', linewidth=2, zorder=8)
plt.plot(np.rad2deg(data['traj_S2_W1']), color='#FFB70A', linestyle='dashed', linewidth=2, zorder=7)
plt.plot(np.rad2deg(data['traj_Re_W2']), color='lightgray', linestyle='-.', linewidth=2, zorder=5)
plt.plot(np.rad2deg(data['traj_EB_W2']), color='#3B89F0', linestyle='-.', linewidth=2, zorder=4)
plt.plot(np.rad2deg(data['traj_S1_W2']), color='#41B571', linestyle='-.', linewidth=2, zorder=3)
plt.plot(np.rad2deg(data['traj_S2_W2']), color='#FFB70A', linestyle='-.', linewidth=2, zorder=2)
# Show.
plt.show()
# plt.savefig('Fig5b_7.png', dpi=600)
plt.close()
if __name__ == "__main__":
main()
| 48.303303
| 119
| 0.612558
| 2,715
| 16,085
| 3.492449
| 0.093186
| 0.072559
| 0.035857
| 0.039443
| 0.81217
| 0.798144
| 0.779582
| 0.727589
| 0.714828
| 0.682135
| 0
| 0.09522
| 0.159714
| 16,085
| 332
| 120
| 48.448795
| 0.606318
| 0.096798
| 0
| 0.466981
| 0
| 0
| 0.123795
| 0.004541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009434
| false
| 0
| 0.023585
| 0
| 0.037736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2306467681ba171ea2e8f3766865ee55f20697d4
| 151
|
py
|
Python
|
Lib/__np__/__init__.py
|
Redex-Developers/Nuitka-Python
|
7c1fc1dd6dfeab4cdafeec1709e6e9c4c8c84227
|
[
"0BSD"
] | null | null | null |
Lib/__np__/__init__.py
|
Redex-Developers/Nuitka-Python
|
7c1fc1dd6dfeab4cdafeec1709e6e9c4c8c84227
|
[
"0BSD"
] | null | null | null |
Lib/__np__/__init__.py
|
Redex-Developers/Nuitka-Python
|
7c1fc1dd6dfeab4cdafeec1709e6e9c4c8c84227
|
[
"0BSD"
] | null | null | null |
import platform
if platform.system() == "Windows":
from __np__.windows import *
elif platform.system() == "Linux":
from __np__.linux import *
| 21.571429
| 34
| 0.688742
| 18
| 151
| 5.333333
| 0.5
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178808
| 151
| 6
| 35
| 25.166667
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
233afa6f82ecef2bc7b57dd78d29b1c3eff98c41
| 21,744
|
py
|
Python
|
src/backend/marsha/markdown/tests/test_api.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
src/backend/marsha/markdown/tests/test_api.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
src/backend/marsha/markdown/tests/test_api.py
|
insad-video/marsha
|
1e6a708c74527f50c4aa24d811049492e75f47a0
|
[
"MIT"
] | null | null | null |
"""Tests for the Markdown application API."""
import json
import random
from django.test import TestCase, override_settings
from rest_framework_simplejwt.tokens import AccessToken
from marsha.core import factories as core_factories
from ..factories import MarkdownDocumentFactory
from ..models import MarkdownDocument
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument
@override_settings(MARKDOWN_ENABLED=True)
class MarkdownAPITest(TestCase):
"""Test for the Markdown document API."""
maxDiff = None
def test_api_document_fetch_anonymous(self):
"""Anonymous users should not be able to fetch a Markdown document."""
markdown_document = MarkdownDocumentFactory()
response = self.client.get(f"/api/markdown-documents/{markdown_document.pk}/")
self.assertEqual(response.status_code, 401)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_api_document_fetch_student(self):
"""A student should not be allowed to fetch a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
f"/api/markdown-documents/{markdown_document.pk}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
def test_api_document_fetch_instructor(self):
"""An instructor should be able to fetch a Markdown document."""
markdown_document = MarkdownDocumentFactory(
pk="4c51f469-f91e-4998-b438-e31ee3bd3ea6",
playlist__pk="6a716ff3-1bfb-4870-906e-fda50293f0ac",
playlist__title="foo",
playlist__lti_id="course-v1:ufr+mathematics+00001",
translations__title="Amazing title",
translations__content="# Heading1\nSome content",
translations__rendered_content="<h1>Heading1</h1>\n<p>Some content</p>",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
f"/api/markdown-documents/{markdown_document.pk}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
{
"id": "4c51f469-f91e-4998-b438-e31ee3bd3ea6",
"is_draft": True,
"rendering_options": {},
"translations": [
{
"language_code": "en",
"title": "Amazing title",
"content": "# Heading1\nSome content",
"rendered_content": "<h1>Heading1</h1>\n<p>Some content</p>",
}
],
"playlist": {
"id": "6a716ff3-1bfb-4870-906e-fda50293f0ac",
"title": "foo",
"lti_id": "course-v1:ufr+mathematics+00001",
},
"position": 0,
},
)
def test_api_document_fetch_instructor_read_only(self):
"""An instructor should not be able to fetch a Markdown document in read_only."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
response = self.client.get(
f"/api/markdown-documents/{markdown_document.pk}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
def test_api_document_fetch_list_anonymous(self):
"""An anonymous should not be able to fetch a list of Markdown document."""
response = self.client.get("/api/markdown-documents/")
self.assertEqual(response.status_code, 401)
def test_api_document_fetch_list_student(self):
"""A student should not be able to fetch a list of Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_fetch_list_instructor(self):
"""An instrustor should not be able to fetch a Markdown document list."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_anonymous(self):
"""An anonymous should not be able to create a Markdown document."""
response = self.client.post("/api/markdown-documents/")
self.assertEqual(response.status_code, 401)
def test_api_document_create_student(self):
"""A student should not be able to create a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.post(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_student_with_playlist_token(self):
"""A student with a playlist token should not be able to create a Markdown document."""
playlist = core_factories.PlaylistFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(playlist.id)
response = self.client.post(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_instructor(self):
"""An instrustor should not be able to create a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.get(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_instructor_with_playlist_token(self):
"""
Create document with playlist token.
Used in the context of a lti select request (deep linking).
"""
playlist = core_factories.PlaylistFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(playlist.id)
self.assertEqual(MarkdownDocument.objects.count(), 0)
response = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_one",
"playlist": str(playlist.id),
"title": "Some document",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 1)
self.assertEqual(response.status_code, 201)
document = MarkdownDocument.objects.first()
self.assertEqual(
response.json(),
{
"id": str(document.id),
"is_draft": True,
"playlist": {
"id": str(playlist.id),
"lti_id": playlist.lti_id,
"title": playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Some document",
}
],
},
)
def test_api_document_delete_anonymous(self):
"""An anonymous should not be able to delete a Markdown document."""
markdown_document = MarkdownDocumentFactory()
response = self.client.delete(
f"/api/markdown-documents/{markdown_document.pk}/",
)
self.assertEqual(response.status_code, 401)
def test_api_document_delete_student(self):
"""A student should not be able to delete a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.delete(
f"/api/markdown-documents/{markdown_document.pk}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
def test_api_document_delete_instructor(self):
"""An instructor should not be able to create a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.delete(
f"/api/markdown-documents/{markdown_document.pk}/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 405)
def test_api_document_update_anonymous(self):
"""An anonymous should not be able to update a Markdown document."""
markdown_document = MarkdownDocumentFactory()
response = self.client.put(f"/api/markdown-documents/{markdown_document.pk}/")
self.assertEqual(response.status_code, 401)
response = self.client.patch(
f"/api/markdown-documents/{markdown_document.pk}/save-translations/",
content_type="application/json",
)
self.assertEqual(response.status_code, 401)
response = self.client.post(
f"/api/markdown-documents/{markdown_document.pk}/latex-rendering/",
content_type="application/json",
)
self.assertEqual(response.status_code, 401)
def test_api_document_update_student(self):
"""A student user should not be able to update a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = ["student"]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"title": "new title"}
response = self.client.put(
f"/api/markdown-documents/{markdown_document.pk}/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
response = self.client.patch(
f"/api/markdown-documents/{markdown_document.pk}/save-translations/",
data, # Not important here, wrong data raises 400
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
response = self.client.post(
f"/api/markdown-documents/{markdown_document.pk}/latex-rendering/",
data, # Not important here, wrong data raises 400
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_api_document_update_instructor_read_only(self):
"""An instructor should not be able to update a Markdown document in read_only."""
markdown_document = MarkdownDocumentFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": False}
data = {"title": "new title"}
response = self.client.put(
f"/api/markdown-documents/{markdown_document.pk}/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
response = self.client.patch(
f"/api/markdown-documents/{markdown_document.pk}/save-translations/",
data, # Not important here, wrong data raises 400
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
response = self.client.post(
f"/api/markdown-documents/{markdown_document.pk}/latex-rendering/",
data, # Not important here, wrong data raises 400
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_api_document_update_instructor(self):
"""An instructor should be able to update a Markdown document."""
markdown_document = MarkdownDocumentFactory(is_draft=True)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {"is_draft": False}
response = self.client.put(
f"/api/markdown-documents/{markdown_document.pk}/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
markdown_document.refresh_from_db()
self.assertEqual(markdown_document.is_draft, False)
def test_api_document_translation_update_instructor(self):
"""An instructor should be able to update a Markdown document translated content."""
markdown_document = MarkdownDocumentFactory(is_draft=True)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
data = {
"language_code": "en",
"title": "A very specific title",
"content": "Some interesting content for sure",
"rendered_content": "<p>Some interesting content for sure</p>",
}
response = self.client.patch(
f"/api/markdown-documents/{markdown_document.pk}/save-translations/",
data,
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
markdown_document.refresh_from_db()
markdown_document.set_current_language("en")
self.assertEqual(markdown_document.title, "A very specific title")
self.assertEqual(markdown_document.content, "Some interesting content for sure")
self.assertEqual(
markdown_document.rendered_content,
"<p>Some interesting content for sure</p>",
)
def test_api_document_render_latex_instructor(self):
"""An instructor should be able to render LaTeX content content."""
markdown_document = MarkdownDocumentFactory(is_draft=True)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(markdown_document.pk)
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
response = self.client.post(
f"/api/markdown-documents/{markdown_document.pk}/latex-rendering/",
{"text": r"I = \int \rho R^{2} dV"},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# Content is already tested elsewhere
self.assertIn(
"<svg version='1.1' xmlns='http://www.w3.org/2000/svg'",
content["latex_image"],
)
def test_api_select_instructor_no_document(self):
"""An instructor should be able to fetch a Markdown document lti select."""
playlist = core_factories.PlaylistFactory()
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(playlist.id)
response = self.client.get(
"/api/markdown-documents/lti-select/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
{
"new_url": "http://testserver/lti/markdown_documents/",
"markdown_documents": [],
},
response.json(),
)
def test_api_select_instructor(self):
"""An instructor should be able to fetch a Markdown document lti select."""
markdown_document = MarkdownDocumentFactory(
translations__title="Amazing title",
translations__content="# Heading1\nSome content",
translations__rendered_content="<h1>Heading1</h1>\n<p>Some content</p>",
)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = "None"
jwt_token.payload["roles"] = [random.choice(["instructor", "administrator"])]
jwt_token.payload["permissions"] = {"can_update": True}
jwt_token.payload["playlist_id"] = str(markdown_document.playlist_id)
response = self.client.get(
"/api/markdown-documents/lti-select/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertDictEqual(
{
"new_url": "http://testserver/lti/markdown_documents/",
"markdown_documents": [
{
"id": str(markdown_document.id),
"is_draft": markdown_document.is_draft,
"lti_id": str(markdown_document.lti_id),
"lti_url": (
f"http://testserver/lti/markdown_documents/{str(markdown_document.id)}"
),
"rendering_options": {},
"translations": [
{
"language_code": "en",
"title": "Amazing title",
"content": "# Heading1\nSome content",
"rendered_content": "<h1>Heading1</h1>\n<p>Some content</p>",
}
],
"playlist": {
"id": str(markdown_document.playlist_id),
"title": markdown_document.playlist.title,
"lti_id": markdown_document.playlist.lti_id,
},
"position": markdown_document.position,
},
],
},
response.json(),
)
| 41.026415
| 99
| 0.608168
| 2,231
| 21,744
| 5.724787
| 0.093232
| 0.061384
| 0.068118
| 0.065847
| 0.865722
| 0.826652
| 0.817178
| 0.79964
| 0.789305
| 0.751331
| 0
| 0.01377
| 0.275248
| 21,744
| 529
| 100
| 41.10397
| 0.796688
| 0.088714
| 0
| 0.644928
| 0
| 0.002415
| 0.231164
| 0.081447
| 0
| 0
| 0
| 0
| 0.103865
| 1
| 0.055556
| false
| 0
| 0.016908
| 0
| 0.077295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
234f04098db7a81d894b871aada9b45c054ed479
| 88
|
py
|
Python
|
py_aco/__init__.py
|
Joanguitar/ACO
|
3a52ddbdb1bd8c5826b8d0fcfca02f8c4e37be74
|
[
"MIT"
] | null | null | null |
py_aco/__init__.py
|
Joanguitar/ACO
|
3a52ddbdb1bd8c5826b8d0fcfca02f8c4e37be74
|
[
"MIT"
] | null | null | null |
py_aco/__init__.py
|
Joanguitar/ACO
|
3a52ddbdb1bd8c5826b8d0fcfca02f8c4e37be74
|
[
"MIT"
] | null | null | null |
from . import core
from . import codebook
from . import simulation
from . import method
| 17.6
| 24
| 0.772727
| 12
| 88
| 5.666667
| 0.5
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 88
| 4
| 25
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
88ef395f52a7cf650440c1df94822b63594358cc
| 107
|
py
|
Python
|
testpkg/math.py
|
HurricanKai/PythonTest
|
d46c9367279c5e94d7d40e96db87d4016c5d4549
|
[
"MIT"
] | null | null | null |
testpkg/math.py
|
HurricanKai/PythonTest
|
d46c9367279c5e94d7d40e96db87d4016c5d4549
|
[
"MIT"
] | null | null | null |
testpkg/math.py
|
HurricanKai/PythonTest
|
d46c9367279c5e94d7d40e96db87d4016c5d4549
|
[
"MIT"
] | null | null | null |
def inc(i):
return i + 1
def add(a, b):
return a + b
def sub(a, b):
return a - b
| 9.727273
| 17
| 0.439252
| 20
| 107
| 2.35
| 0.45
| 0.170213
| 0.340426
| 0.382979
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.429907
| 107
| 10
| 18
| 10.7
| 0.754098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
002f57658be8b54004ad549e2327b22502df5f2a
| 123
|
py
|
Python
|
zeus/metrics/mindspore/__init__.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 240
|
2020-08-15T15:11:49.000Z
|
2022-03-28T07:26:23.000Z
|
zeus/metrics/mindspore/__init__.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 20
|
2020-08-29T06:18:21.000Z
|
2022-03-21T04:35:57.000Z
|
zeus/metrics/mindspore/__init__.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 69
|
2020-08-15T15:41:53.000Z
|
2022-03-16T08:27:47.000Z
|
from .metrics import *
from .classifier_metric import accuracy
from .sr_metric import *
from .segmentation_metric import *
| 24.6
| 39
| 0.813008
| 16
| 123
| 6.0625
| 0.5
| 0.371134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130081
| 123
| 4
| 40
| 30.75
| 0.906542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ccad162f2d74f62ef8175f26952c6737701a92ea
| 74
|
py
|
Python
|
cs229/assignment/problem_set_1/__init__.py
|
Syhen/stanford-learn
|
4c707ec736c83eb968fc0b3d747c94280f298fa6
|
[
"MIT"
] | null | null | null |
cs229/assignment/problem_set_1/__init__.py
|
Syhen/stanford-learn
|
4c707ec736c83eb968fc0b3d747c94280f298fa6
|
[
"MIT"
] | null | null | null |
cs229/assignment/problem_set_1/__init__.py
|
Syhen/stanford-learn
|
4c707ec736c83eb968fc0b3d747c94280f298fa6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
create on 2021-01-29 20:19
author @66492
"""
| 10.571429
| 26
| 0.554054
| 12
| 74
| 3.416667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0.189189
| 74
| 6
| 27
| 12.333333
| 0.383333
| 0.851351
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aeede831aba40d89aebbe546b3a6603fb1cc78a9
| 215
|
py
|
Python
|
src/isecurity_webserver/data_model/domains.py
|
cybercamp18isecurity/iSecurity
|
016e1bb7d73864654323e2aac00024483741f8ed
|
[
"MIT"
] | 4
|
2018-11-30T22:49:52.000Z
|
2019-06-20T22:36:23.000Z
|
src/isecurity_webserver/data_model/domains.py
|
cybercamp18isecurity/iSecurity
|
016e1bb7d73864654323e2aac00024483741f8ed
|
[
"MIT"
] | 3
|
2018-11-30T12:06:21.000Z
|
2018-12-11T21:09:07.000Z
|
src/isecurity_webserver/data_model/domains.py
|
cybercamp18isecurity/iSecurity
|
016e1bb7d73864654323e2aac00024483741f8ed
|
[
"MIT"
] | 4
|
2018-12-01T01:19:36.000Z
|
2019-10-22T05:54:48.000Z
|
from .abstract_model import AbstractModel
class Domains(AbstractModel):
def __init__(self, elasticsearch):
self.data_type = "domains"
AbstractModel.__init__(self, elasticsearch, self.data_type)
| 30.714286
| 67
| 0.748837
| 23
| 215
| 6.521739
| 0.565217
| 0.266667
| 0.28
| 0.333333
| 0.44
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167442
| 215
| 6
| 68
| 35.833333
| 0.837989
| 0
| 0
| 0
| 0
| 0
| 0.032558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
4e21db78ad0c095ab4531a725238c82feef73814
| 36,491
|
py
|
Python
|
figure_3.py
|
lullimat/arXiv-2009.12522
|
b9c2c813983eedfc29a59a95ab441570bc7a5ba7
|
[
"MIT"
] | null | null | null |
figure_3.py
|
lullimat/arXiv-2009.12522
|
b9c2c813983eedfc29a59a95ab441570bc7a5ba7
|
[
"MIT"
] | null | null | null |
figure_3.py
|
lullimat/arXiv-2009.12522
|
b9c2c813983eedfc29a59a95ab441570bc7a5ba7
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../../")
device_str, lang, _dpi = sys.argv[1], sys.argv[2], int(sys.argv[3])
from sympy import exp as sp_exp
from sympy import symbols as sp_symbols
from sympy import Rational as sp_Rational
from collections import defaultdict
import numpy as np
from idpy.Utils.ManageData import ManageData
from idpy.LBM.LBM import XIStencils
from idpy.LBM.SCFStencils import SCFStencils, BasisVectors
from idpy.LBM.SCThermo import ShanChanEquilibriumCache
from pathlib import Path
reproduced_results = Path("reproduced-results")
##########################################################################
n = sp_symbols('n')
psis = [sp_exp(-1/n), 1 - sp_exp(-n)]
psi_codes = {psis[0]: 'exp((NType)(-1./ln))',
psis[1]: '1. - exp(-(NType)ln)',}
Gs = {psis[0]: [-2.6, -3.1, -3.6],
psis[1]: [-1.4, -1.6, -1.75]}
Ls = [127, 159, 191, 223, 255, 287, 319, 351]
E6_P2F6_sym = sp_symbols("\\boldsymbol{E}^{(6)}_{P2\,F6}")
E6_P4F6_sym = sp_symbols("\\boldsymbol{E}^{(6)}_{P4\,F6}")
E8_P2F8_sym = sp_symbols("\\boldsymbol{E}^{(8)}_{P2\,F8}")
E8_P4F6_sym = sp_symbols("\\boldsymbol{E}^{(8)}_{P4\,F6}")
E10_P2F10_sym = sp_symbols("\\boldsymbol{E}^{(10)}_{P2\,F10}")
E10_P4F6_sym = sp_symbols("\\boldsymbol{E}^{(10)}_{P4\,F6}")
E12_P2F12_sym = sp_symbols("\\boldsymbol{E}^{(12)}_{P2\,F12}")
E12_P4F6_sym = sp_symbols("\\boldsymbol{E}^{(12)}_{P4\,F6}")
'''
Getting usual weights
'''
S5_E6_P2F6 = SCFStencils(E = BasisVectors(x_max = 2),
len_2s = [1, 2, 4])
S5_E6_P2F6_W = S5_E6_P2F6.FindWeights()
S5_E8_P2F8 = SCFStencils(E = BasisVectors(x_max = 2),
len_2s = [1, 2, 4, 5, 8])
S5_E8_P2F8_W = S5_E8_P2F8.FindWeights()
S5_E10_P2F10 = SCFStencils(E = BasisVectors(x_max = 3),
len_2s = [1, 2, 4, 5, 8, 9, 10])
S5_E10_P2F10_W = S5_E10_P2F10.FindWeights()
S5_E12_P2F12 = SCFStencils(E = BasisVectors(x_max = 4),
len_2s = [1, 2, 4, 5, 8, 9, 10, 13, 16, 17])
S5_E12_P2F12_W = S5_E12_P2F12.FindWeights()
'''
File Names
'''
stencil_string = {E6_P2F6_sym: 'E6_P2F6',
E6_P4F6_sym: 'E6_P4F6',
E8_P2F8_sym: 'E8_P2F8',
E8_P4F6_sym: 'E8_P4F6',
E10_P2F10_sym: 'E10_P2F10',
E10_P4F6_sym: 'E10_P4F6',
E12_P2F12_sym: 'E12_P2F12',
E12_P4F6_sym: 'E12_P4F6'}
stencil_dict = {E6_P2F6_sym: S5_E6_P2F6,
E8_P2F8_sym: S5_E8_P2F8,
E10_P2F10_sym: S5_E10_P2F10,
E12_P2F12_sym: S5_E12_P2F12}
stencil_sym_list = [E6_P2F6_sym, E6_P4F6_sym,
E8_P2F8_sym, E8_P4F6_sym,
E10_P2F10_sym, E10_P4F6_sym,
E12_P2F12_sym, E12_P4F6_sym]
def FlatFileName(stencil_sym, psi):
psi_str = str(psi).replace("/", "_").replace("-", "_")
psi_str = psi_str.replace(" ", "_")
psi_str = psi_str.replace("(", "").replace(")","")
lang_str = str(lang) + "_" + device_str
return (lang_str + stencil_string[stencil_sym] + "_" +
psi_str + "_flat_profile")
def LaplaceFileName(stencil_sym, psi):
psi_str = str(psi).replace("/", "_").replace("-", "_")
psi_str = psi_str.replace(" ", "_")
psi_str = psi_str.replace("(", "").replace(")","")
lang_str = str(lang) + "_" + device_str
return (lang_str + stencil_string[stencil_sym] + "_" +
psi_str + "_laplace")
def StencilPsiKey(stencil_sym, psi):
return str(stencil_sym) + "_" + str(psi)
laplace_files = {}
for key in stencil_string:
for _psi in psis:
laplace_files[StencilPsiKey(key, _psi)] = \
reproduced_results / LaplaceFileName(key, _psi)
rho_fields = {}
E_sym_dict = {E6_P2F6_sym: 'E6P2', E6_P4F6_sym: 'E6P4'}
gibbs_rad = defaultdict( # G
lambda: defaultdict( # 'B2F6'
lambda: defaultdict( # 'P4Iso=' + YN
lambda: defaultdict(dict) # 'droplet'
)
)
)
delta_p = defaultdict( # G
lambda: defaultdict( # 'B2F6'
lambda: defaultdict( # 'P4Iso=' + YN
lambda: defaultdict(dict) # 'droplet'
)
)
)
E_sym_YN = {E6_P2F6_sym: 'No', E6_P4F6_sym: 'Yes',
E8_P2F8_sym: 'No', E8_P4F6_sym: 'Yes',
E10_P2F10_sym: 'No', E10_P4F6_sym: 'Yes',
E12_P2F12_sym: 'No', E12_P4F6_sym: 'Yes'}
for _psi in psis:
for _stencil in [E6_P2F6_sym, E6_P4F6_sym,
E8_P2F8_sym, E8_P4F6_sym,
E10_P2F10_sym, E10_P4F6_sym,
E12_P2F12_sym, E12_P4F6_sym]:
_data_swap = ManageData(dump_file = laplace_files[StencilPsiKey(_stencil, _psi)])
_is_file_there = _data_swap.Read()
if not _is_file_there:
raise Exception("Could not find file!",
laplace_files[StencilPsiKey(_stencil, _psi)])
for G in Gs[_psi]:
_swap_gibbs_rad, _swap_delta_p = [], []
for L in Ls:
_data_key = str(G) + "_" + str(L)
_swap_gibbs_rad.append(_data_swap.PullData(_data_key)['R_Gibbs'])
_swap_delta_p.append(_data_swap.PullData(_data_key)['delta_p'])
gibbs_rad['G=' + str(G)][stencil_string[_stencil]]['P4Iso=' + E_sym_YN[_stencil]]['droplet'] = \
np.array(_swap_gibbs_rad)
delta_p['G=' + str(G)][stencil_string[_stencil]]['P4Iso=' + E_sym_YN[_stencil]]['droplet'] = \
np.array(_swap_delta_p)
sigma_f = defaultdict( # G
lambda: defaultdict( # 'B2F6'
lambda: defaultdict( # 'P4Iso=' + YN
lambda: defaultdict(dict) # 'droplet'
)
)
)
for _psi in psis:
for _stencil in [E6_P2F6_sym, E8_P2F8_sym, E10_P2F10_sym, E12_P2F12_sym]:
for G in Gs[_psi]:
_sc_eq_cache = ShanChanEquilibriumCache(stencil = stencil_dict[_stencil],
psi_f = _psi, G = G,
c2 = XIStencils['D2Q9']['c2'])
sigma_f['G=' + str(G)][stencil_string[_stencil]]['P4Iso=' + E_sym_YN[_stencil]]['droplet'] = \
_sc_eq_cache.GetFromCache()['sigma_f']
print("Surface tension (G = ", str(G), ": ",
_sc_eq_cache.GetFromCache()['sigma_f'], "), psi = ", _psi)
##################################################
############# END OF DATA PREPARATION ############
##################################################
# https://stackoverflow.com/questions/14737681/fill-the-right-column-of-a-matplotlib-legend-first
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,
mark_inset)
import matplotlib.ticker as ticker
##################################################
#################### FIGURE 1 ####################
##################################################
from matplotlib import rc, rcParams
##rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('font',**{'family':'STIXGeneral'})
rc('mathtext', **{'fontset': 'stix'})
rc('text', usetex=True)
## To align latex text and symbols!!!
## https://stackoverflow.com/questions/40424249/vertical-alignment-of-matplotlib-legend-labels-with-latex-math
rcParams['text.latex.preview'] = True
rcParams['text.latex.preamble']=[r"\usepackage{amsmath, sourcesanspro}"]
x_lim = 0.042
rm1_axis = np.linspace(0, x_lim, 2**7)
_panel_label_pos = (0.02, 0.89)
_panel_label_pos = (0.89, 1.05)
a = 0.9
b_height = 0.8
legend_size = 10
dashed = {}
dashed[-2.6] = '-'
dashed[-3.1] = '--'
dashed[-3.6] = '-.'
dashed[-1.4] = '-'
dashed[-1.6] = '--'
dashed[-1.75] = '-.'
f_s = 14
#################### SIZES ####################
fig = plt.figure(figsize=(5.2, 10))
###############################################
#################### PANEL (a) ####################
mark_s = 9
ax1 = plt.subplot2grid((4,2), (0,0), colspan=1, rowspan=1)
black_lines = []
black_labels = []
G = -2.6
red_p, = ax1.plot(1./gibbs_rad['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(6)}_{P4,F6}$')
blue_p, = ax1.plot(1./gibbs_rad['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(6)}_{P2,F6}$')
line_swap, = ax1.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-3.1, -3.6]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax1.plot(1./gibbs_rad['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax1.plot(1./gibbs_rad['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax1.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
#(lines, labels) = plt.gca().get_legend_handles_labels()
#lines.insert(2, plt.Line2D(rm1_axis, rm1_axis, linestyle='none'))
#labels.insert(2,'')
# _{\\mbox{\\tiny{Gibbs}}}
ax1.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax1.set_xlabel('$R^{-1}$', fontsize=f_s)
ax1.set_ylabel('$\\Delta p$', fontsize=f_s)
ax1.set_xlim([0,x_lim])
### points legend
legend_size = 10
if False:
lgnd_points = plt.legend(handles = [red_p, blue_p], ncol = 2,
handletextpad = 0., columnspacing = 0.75,
bbox_to_anchor=(1.55, 1.27), frameon=False)
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
### lines legends
black_lines.insert(0, plt.Line2D(rm1_axis, rm1_axis, linestyle='none', label = '$\psi = \exp(-1/n)$'))
lgnd_lines = plt.legend(handles = black_lines, handlelength = 2,
labelspacing=0.2,
bbox_to_anchor=(0.925, 1.75),
frameon=True)
lgnd_lines.get_texts()[0].set_x(-20)
lgnd_lines.get_texts()[0].set_size("large")
lgnd_lines.get_texts()[1].set_size("large")
lgnd_lines.get_texts()[2].set_size("large")
lgnd_lines.get_texts()[3].set_size("large")
#ml = [method_name for method_name in dir(lgnd_lines.get_texts()[0]) if callable(getattr(lgnd_lines.get_texts()[0], method_name))]
#print(ml)
### adding legents to the plot
ax1.add_artist(lgnd_points)
ax1.add_artist(lgnd_lines)
#lgnd1 = ax1.legend(lines,labels,numpoints=1, loc=4,ncol=2)
#lgnd1 = ax1.legend(lines, labels, loc='upper center', ncol=2, fancybox=True,
# bbox_to_anchor=(0.4, 1.9), frameon=False, handleheight=1.,
# prop={'size': legend_size}, borderpad=0.8, labelspacing=0.5)
# Shrink current axis by 20%
b_height = 1
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax1.text(_panel_label_pos[0], _panel_label_pos[1], '$(a)$',
transform = ax1.transAxes, fontsize=f_s)
#################### PANEL (b) ####################
mark_s = 9
ax2 = plt.subplot2grid((4,2), (1,0), colspan=1, rowspan=1)
black_lines = []
black_labels = []
G = -2.6
red_p, = ax2.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(8)}_{P4,F6}$')
blue_p, = ax2.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(8)}_{P2,F8}$')
line_swap, = ax2.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-3.1, -3.6]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax2.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax2.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax2.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
#(lines, labels) = plt.gca().get_legend_handles_labels()
#lines.insert(2, plt.Line2D(rm1_axis, rm1_axis, linestyle='none'))
#labels.insert(2,'')
# _{\\mbox{\\tiny{Gibbs}}}
ax2.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax2.set_xlabel('$R^{-1}$', fontsize=f_s)
ax2.set_ylabel('$\\Delta p$', fontsize=f_s)
ax2.set_xlim([0,x_lim])
### points legend
legend_size = 10
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ml = [method_name for method_name in dir(lgnd_lines.get_texts()[0]) if callable(getattr(lgnd_lines.get_texts()[0], method_name))]
#print(ml)
### adding legents to the plot
ax2.add_artist(lgnd_points)
#lgnd1 = ax2.legend(lines,labels,numpoints=1, loc=4,ncol=2)
#lgnd1 = ax2.legend(lines, labels, loc='upper center', ncol=2, fancybox=True,
# bbox_to_anchor=(0.4, 1.9), frameon=False, handleheight=1.,
# prop={'size': legend_size}, borderpad=0.8, labelspacing=0.5)
# Shrink current axis by 20%
b_height = 1
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax2.text(_panel_label_pos[0], _panel_label_pos[1], '$(b)$',
transform = ax2.transAxes, fontsize=f_s)
#################### PANEL (c) ####################
mark_s = 9
ax3 = plt.subplot2grid((4,2), (2,0), colspan=1, rowspan=1)
black_lines = []
black_labels = []
G = -2.6
red_p, = ax3.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(10)}_{P4,F6}$')
blue_p, = ax3.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(10)}_{P2,F10}$')
line_swap, = ax3.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-3.1, -3.6]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax3.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax3.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax3.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
#(lines, labels) = plt.gca().get_legend_handles_labels()
#lines.insert(2, plt.Line2D(rm1_axis, rm1_axis, linestyle='none'))
#labels.insert(2,'')
# _{\\mbox{\\tiny{Gibbs}}}
ax3.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax3.set_xlabel('$R^{-1}$', fontsize=f_s)
ax3.set_ylabel('$\\Delta p$', fontsize=f_s)
ax3.set_xlim([0,x_lim])
### points legend
legend_size = 10
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ml = [method_name for method_name in dir(lgnd_lines.get_texts()[0]) if callable(getattr(lgnd_lines.get_texts()[0], method_name))]
#print(ml)
### adding legents to the plot
ax3.add_artist(lgnd_points)
#lgnd1 = ax3.legend(lines,labels,numpoints=1, loc=4,ncol=2)
#lgnd1 = ax3.legend(lines, labels, loc='upper center', ncol=2, fancybox=True,
# bbox_to_anchor=(0.4, 1.9), frameon=False, handleheight=1.,
# prop={'size': legend_size}, borderpad=0.8, labelspacing=0.5)
# Shrink current axis by 20%
b_height = 1
box = ax3.get_position()
ax3.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax3.text(_panel_label_pos[0], _panel_label_pos[1], '$(c)$',
transform = ax3.transAxes, fontsize=f_s)
#################### PANEL (d) ####################
mark_s = 9
ax4 = plt.subplot2grid((4,2), (3,0), colspan=1, rowspan=1)
black_lines = []
black_labels = []
G = -2.6
red_p, = ax4.plot(1./gibbs_rad['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(12)}_{P4,F6}$')
blue_p, = ax4.plot(1./gibbs_rad['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(12)}_{P2,F12}$')
line_swap, = ax4.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-3.1, -3.6]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax4.plot(1./gibbs_rad['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax4.plot(1./gibbs_rad['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax4.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
#(lines, labels) = plt.gca().get_legend_handles_labels()
#lines.insert(2, plt.Line2D(rm1_axis, rm1_axis, linestyle='none'))
#labels.insert(2,'')
# _{\\mbox{\\tiny{Gibbs}}}
ax4.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax4.set_xlabel('$R^{-1}$', fontsize=f_s)
ax4.set_ylabel('$\\Delta p$', fontsize=f_s)
ax4.set_xlim([0,x_lim])
### points legend
legend_size = 10
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ml = [method_name for method_name in dir(lgnd_lines.get_texts()[0]) if callable(getattr(lgnd_lines.get_texts()[0], method_name))]
#print(ml)
### adding legents to the plot
ax4.add_artist(lgnd_points)
#lgnd1 = ax4.legend(lines,labels,numpoints=1, loc=4,ncol=2)
#lgnd1 = ax4.legend(lines, labels, loc='upper center', ncol=2, fancybox=True,
# bbox_to_anchor=(0.4, 1.9), frameon=False, handleheight=1.,
# prop={'size': legend_size}, borderpad=0.8, labelspacing=0.5)
# Shrink current axis by 20%
b_height = 1
box = ax4.get_position()
ax4.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax4.text(_panel_label_pos[0], _panel_label_pos[1], '$(d)$',
transform = ax4.transAxes, fontsize=f_s)
###################################################
################## SECOND COLUMN ##################
#################### PANEL (e) ####################
ax5 = plt.subplot2grid((4,2), (0,1), colspan=1, rowspan=1)
black_lines = []
G = -1.4
red_p, = ax5.plot(1./gibbs_rad['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(6)}_{P4,F6}$')
blue_p, = ax5.plot(1./gibbs_rad['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(6)}_{P2,F6}$')
line_swap, = ax5.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-1.6, -1.75]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax5.plot(1./gibbs_rad['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E6_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax5.plot(1./gibbs_rad['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax5.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E6_P2F6']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
ax5.set_xlabel('$R^{-1}$', fontsize=f_s)
#ax5.set_title('$\psi = 1 - \\exp(-n)$')
ax5.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax5.set_xlim([0,x_lim])
lgnd_points2 = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points2.get_texts()[0].set_color("red")
lgnd_points2.get_texts()[1].set_color("blue")
lgnd_points2.get_texts()[0].set_size("large")
lgnd_points2.get_texts()[1].set_size("large")
lgnd_points2.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points2.legendHandles[1]._legmarker.set_markersize(6)
### lines legends
black_lines.insert(0, plt.Line2D(rm1_axis, rm1_axis, linestyle='none', label = '$\psi = 1 - \exp(-n)$'))
lgnd_lines2 = plt.legend(handles = black_lines, handlelength = 2.,
labelspacing=0.2,
bbox_to_anchor=(0.9625, 1.75),
frameon=True)
lgnd_lines2.get_texts()[0].set_x(-20)
lgnd_lines2.get_texts()[0].set_size("large")
lgnd_lines2.get_texts()[1].set_size("large")
lgnd_lines2.get_texts()[2].set_size("large")
lgnd_lines2.get_texts()[3].set_size("large")
### adding legents to the plot
ax5.add_artist(lgnd_points2)
ax5.add_artist(lgnd_lines2)
# Shrink current axis by 20%
box = ax5.get_position()
ax5.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax5.text(_panel_label_pos[0], _panel_label_pos[1], '$(e)$',
transform = ax5.transAxes, fontsize=f_s)
#################### PANEL (f) ####################
ax6 = plt.subplot2grid((4,2), (1,1), colspan=1, rowspan=1)
black_lines = []
G = -1.4
red_p, = ax6.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(8)}_{P4,F6}$')
blue_p, = ax6.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(8)}_{P2,F8}$')
line_swap, = ax6.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-1.6, -1.75]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax6.plot(1./gibbs_rad['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E8_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax6.plot(1./gibbs_rad['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax6.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E8_P2F8']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
ax6.set_xlabel('$R^{-1}$', fontsize=f_s)
#ax6.set_title('$\psi = 1 - \\exp(-n)$')
ax6.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax6.set_xlim([0,x_lim])
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ax6.legend(loc='upper center', ncol=1, fancybox=True,
# bbox_to_anchor=(0.5, 1.7), frameon=False, prop={'size': legend_size})
# Shrink current axis by 20%
box = ax6.get_position()
ax6.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax6.text(_panel_label_pos[0], _panel_label_pos[1], '$(f)$',
transform = ax6.transAxes, fontsize=f_s)
#################### PANEL (g) ####################
ax7 = plt.subplot2grid((4,2), (2,1), colspan=1, rowspan=1)
black_lines = []
G = -1.4
red_p, = ax7.plot(1./gibbs_rad['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(10)}_{P4,F6}$')
blue_p, = ax7.plot(1./gibbs_rad['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(10)}_{P2,F10}$')
line_swap, = ax7.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-1.6, -1.75]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax7.plot(1./gibbs_rad['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E10_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax7.plot(1./gibbs_rad['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax7.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E10_P2F10']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
ax7.set_xlabel('$R^{-1}$', fontsize=f_s)
#ax7.set_title('$\psi = 1 - \\exp(-n)$')
ax7.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax7.set_xlim([0,x_lim])
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ax7.legend(loc='upper center', ncol=1, fancybox=True,
# bbox_to_anchor=(0.5, 1.7), frameon=False, prop={'size': legend_size})
# Shrink current axis by 20%
box = ax7.get_position()
ax7.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax7.text(_panel_label_pos[0], _panel_label_pos[1], '$(g)$',
transform = ax7.transAxes, fontsize=f_s)
#################### PANEL (h) ####################
ax8 = plt.subplot2grid((4,2), (3,1), colspan=1, rowspan=1)
black_lines = []
G = -1.4
red_p, = ax8.plot(1./gibbs_rad['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s, label = r'$\boldsymbol{E}^{(12)}_{P4,F6}$')
blue_p, = ax8.plot(1./gibbs_rad['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s, label = r'$\boldsymbol{E}^{(12)}_{P2,F12}$')
line_swap, = ax8.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
label = '$Gc_s^2=' + str(G) + '$', color = 'black')
black_lines.append(line_swap)
min_y, max_y = 0, 0
for G in [-1.6, -1.75]:
if min_y == 0:
min_y = np.amin(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
min_y = min(min_y, np.amin(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet']))
if max_y == 0:
max_y = np.amax(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'])
else:
max_y = max(max_y, np.amax(delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet']))
ax8.plot(1./gibbs_rad['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'],
delta_p['G=' + str(G)]['E12_P4F6']['P4Iso=' + 'Yes']['droplet'], 'x', color = 'red',
markersize = mark_s)
ax8.plot(1./gibbs_rad['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
delta_p['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'], '+', color = 'blue',
markersize = mark_s)
line_swap, = ax8.plot(rm1_axis,
rm1_axis * sigma_f['G=' + str(G)]['E12_P2F12']['P4Iso=' + 'No']['droplet'],
dashed[G], color = 'black', label = '$Gc_s^2=' + str(G) + '$')
black_lines.append(line_swap)
ax8.set_xlabel('$R^{-1}$', fontsize=f_s)
#ax8.set_title('$\psi = 1 - \\exp(-n)$')
ax8.ticklabel_format(axis='y', style = 'sci', scilimits=(0,0))
ax8.set_xlim([0,x_lim])
lgnd_points = plt.legend(handles = [red_p, blue_p], loc = 'upper left', frameon=False)
lgnd_points.get_texts()[0].set_color("red")
lgnd_points.get_texts()[1].set_color("blue")
lgnd_points.get_texts()[0].set_size("large")
lgnd_points.get_texts()[1].set_size("large")
lgnd_points.legendHandles[0]._legmarker.set_markersize(6)
lgnd_points.legendHandles[1]._legmarker.set_markersize(6)
#ax8.legend(loc='upper center', ncol=1, fancybox=True,
# bbox_to_anchor=(0.5, 1.7), frameon=False, prop={'size': legend_size})
# Shrink current axis by 20%
box = ax8.get_position()
ax8.set_position([box.x0, box.y0, box.width, box.height * b_height])
ax8.text(_panel_label_pos[0], _panel_label_pos[1], '$(h)$',
transform = ax8.transAxes, fontsize=f_s)
#################### SAVING ####################
fig.tight_layout()
from pathlib import Path
reproduced_figures = Path("reproduced-figures")
if not reproduced_figures.is_dir():
reproduced_figures.mkdir()
plt.savefig(reproduced_figures / 'figure_3.png',
bbox_inches = 'tight', dpi = _dpi)
plt.close()
| 39.492424
| 130
| 0.564468
| 5,258
| 36,491
| 3.674971
| 0.066185
| 0.027532
| 0.030016
| 0.033639
| 0.82394
| 0.789577
| 0.759044
| 0.72556
| 0.72556
| 0.69482
| 0
| 0.060209
| 0.198487
| 36,491
| 923
| 131
| 39.535211
| 0.600451
| 0.109972
| 0
| 0.538462
| 0
| 0
| 0.155583
| 0.023471
| 0.187291
| 0
| 0
| 0
| 0
| 1
| 0.005017
| false
| 0
| 0.0301
| 0.001672
| 0.040134
| 0.001672
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4e325b53dc51c402d92cb47259d5f5b2296a28da
| 130
|
py
|
Python
|
classwork/10_28_2020.py
|
Katsute/Baruch-CIS-2300-Assignments
|
ea374ed1cb229f5e598863ba1777be5f47eaab9d
|
[
"CC0-1.0"
] | null | null | null |
classwork/10_28_2020.py
|
Katsute/Baruch-CIS-2300-Assignments
|
ea374ed1cb229f5e598863ba1777be5f47eaab9d
|
[
"CC0-1.0"
] | null | null | null |
classwork/10_28_2020.py
|
Katsute/Baruch-CIS-2300-Assignments
|
ea374ed1cb229f5e598863ba1777be5f47eaab9d
|
[
"CC0-1.0"
] | 1
|
2022-01-12T18:17:52.000Z
|
2022-01-12T18:17:52.000Z
|
# rand
import random
random.randint(1, 10)
for _ in range(6):
print(random.randint(1, 10))
# pandas
import pandas as pd
| 13
| 32
| 0.669231
| 21
| 130
| 4.095238
| 0.666667
| 0.302326
| 0.325581
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068627
| 0.215385
| 130
| 9
| 33
| 14.444444
| 0.77451
| 0.084615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9d91136969dbe70d8ca19b3aaee0329bc8a50cd8
| 189
|
py
|
Python
|
aulas/aula006b.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
aulas/aula006b.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
aulas/aula006b.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
print('=' * 5, 'AULA_006b', '=' * 5)
n0 = float(input('digite um valor: '))
print(n0)
n1 = str(input('digite um valor: '))
print(type(n1))
n2 = input('digite algo: ')
print(n2.isnumeric())
| 23.625
| 38
| 0.608466
| 29
| 189
| 3.931034
| 0.551724
| 0.289474
| 0.22807
| 0.315789
| 0.403509
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067901
| 0.142857
| 189
| 7
| 39
| 27
| 0.635802
| 0
| 0
| 0
| 0
| 0
| 0.306878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9da3cce61a82aaf61028dee623894e05eee20708
| 8,660
|
py
|
Python
|
tempest/tests/lib/services/identity/v3/test_inherited_roles_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 254
|
2015-01-05T19:22:52.000Z
|
2022-03-29T08:14:54.000Z
|
tempest/tests/lib/services/identity/v3/test_inherited_roles_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 13
|
2015-03-02T15:53:04.000Z
|
2022-02-16T02:28:14.000Z
|
tempest/tests/lib/services/identity/v3/test_inherited_roles_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 367
|
2015-01-07T15:05:39.000Z
|
2022-03-04T09:50:35.000Z
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v3 import inherited_roles_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestInheritedRolesClient(base.BaseServiceTest):
FAKE_LIST_INHERITED_ROLES = {
"roles": [
{
"id": "1",
"name": "test",
"links": "example.com"
},
{
"id": "2",
"name": "test2",
"links": "example.com"
}
]
}
def setUp(self):
super(TestInheritedRolesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = inherited_roles_client.InheritedRolesClient(
fake_auth, 'identity', 'regionOne')
def _test_create_inherited_role_on_domains_user(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_domains_user,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def _test_list_inherited_project_role_for_user_on_domain(
self, bytes_body=False):
self.check_service_client_function(
self.client.list_inherited_project_role_for_user_on_domain,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_INHERITED_ROLES,
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123")
def _test_create_inherited_role_on_domains_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_domains_group,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def _test_list_inherited_project_role_for_group_on_domain(
self, bytes_body=False):
self.check_service_client_function(
self.client.list_inherited_project_role_for_group_on_domain,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_INHERITED_ROLES,
bytes_body,
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123")
def _test_create_inherited_role_on_projects_user(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_projects_user,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def _test_create_inherited_role_on_projects_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_inherited_role_on_projects_group,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_create_inherited_role_on_domains_user_with_str_body(self):
self._test_create_inherited_role_on_domains_user()
def test_create_inherited_role_on_domains_user_with_bytes_body(self):
self._test_create_inherited_role_on_domains_user(bytes_body=True)
def test_create_inherited_role_on_domains_group_with_str_body(self):
self._test_create_inherited_role_on_domains_group()
def test_create_inherited_role_on_domains_group_with_bytes_body(self):
self._test_create_inherited_role_on_domains_group(bytes_body=True)
def test_create_inherited_role_on_projects_user_with_str_body(self):
self._test_create_inherited_role_on_projects_user()
def test_create_inherited_role_on_projects_group_with_bytes_body(self):
self._test_create_inherited_role_on_projects_group(bytes_body=True)
def test_list_inherited_project_role_for_user_on_domain_with_str_body(
self):
self._test_list_inherited_project_role_for_user_on_domain()
def test_list_inherited_project_role_for_user_on_domain_with_bytes_body(
self):
self._test_list_inherited_project_role_for_user_on_domain(
bytes_body=True)
def test_list_inherited_project_role_for_group_on_domain_with_str_body(
self):
self._test_list_inherited_project_role_for_group_on_domain()
def test_list_inherited_project_role_for_group_on_domain_with_bytes_body(
self):
self._test_list_inherited_project_role_for_group_on_domain(
bytes_body=True)
def test_delete_inherited_role_from_user_on_domain(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_user_on_domain,
'tempest.lib.common.rest_client.RestClient.delete',
{},
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_check_user_inherited_project_role_on_domain(self):
self.check_service_client_function(
self.client.check_user_inherited_project_role_on_domain,
'tempest.lib.common.rest_client.RestClient.head',
{},
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_group_on_domain(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_group_on_domain,
'tempest.lib.common.rest_client.RestClient.delete',
{},
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_check_group_inherited_project_role_on_domain(self):
self.check_service_client_function(
self.client.check_group_inherited_project_role_on_domain,
'tempest.lib.common.rest_client.RestClient.head',
{},
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_user_on_project(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_user_on_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_check_user_has_flag_on_inherited_to_project(self):
self.check_service_client_function(
self.client.check_user_has_flag_on_inherited_to_project,
'tempest.lib.common.rest_client.RestClient.head',
{},
project_id="b344506af7644f6794d9cb316600b020",
user_id="123",
role_id="1234",
status=204)
def test_delete_inherited_role_from_group_on_project(self):
self.check_service_client_function(
self.client.delete_inherited_role_from_group_on_project,
'tempest.lib.common.rest_client.RestClient.delete',
{},
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
def test_check_group_has_flag_on_inherited_to_project(self):
self.check_service_client_function(
self.client.check_group_has_flag_on_inherited_to_project,
'tempest.lib.common.rest_client.RestClient.head',
{},
project_id="b344506af7644f6794d9cb316600b020",
group_id="123",
role_id="1234",
status=204)
| 39.18552
| 78
| 0.677714
| 1,013
| 8,660
| 5.304047
| 0.128332
| 0.067746
| 0.070724
| 0.078169
| 0.834543
| 0.834543
| 0.834543
| 0.830449
| 0.790434
| 0.753211
| 0
| 0.072644
| 0.246536
| 8,660
| 220
| 79
| 39.363636
| 0.750805
| 0.066397
| 0
| 0.603352
| 0
| 0
| 0.156238
| 0.135547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139665
| false
| 0
| 0.01676
| 0
| 0.167598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9df3cb95726e327177911e561e5a42756d34d54c
| 33
|
py
|
Python
|
gvision.py
|
Moulin666/Telegram-Neural-Net-Bot
|
7da381765a5bc716921f3931339dd06bed8c232e
|
[
"MIT"
] | null | null | null |
gvision.py
|
Moulin666/Telegram-Neural-Net-Bot
|
7da381765a5bc716921f3931339dd06bed8c232e
|
[
"MIT"
] | null | null | null |
gvision.py
|
Moulin666/Telegram-Neural-Net-Bot
|
7da381765a5bc716921f3931339dd06bed8c232e
|
[
"MIT"
] | null | null | null |
from google.cloud import vision
| 11
| 31
| 0.818182
| 5
| 33
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 2
| 32
| 16.5
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9df4f2730016328f3a5f224ef3b5e9145990dc79
| 98
|
py
|
Python
|
BricksO/bricks/item/__init__.py
|
Jhonan01/Brick
|
09d62d8cde3a5503ad8b84eaea54edbd91445479
|
[
"Apache-2.0"
] | null | null | null |
BricksO/bricks/item/__init__.py
|
Jhonan01/Brick
|
09d62d8cde3a5503ad8b84eaea54edbd91445479
|
[
"Apache-2.0"
] | null | null | null |
BricksO/bricks/item/__init__.py
|
Jhonan01/Brick
|
09d62d8cde3a5503ad8b84eaea54edbd91445479
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
item_bp = Blueprint('item',__name__)
from bricks.item import routes
| 16.333333
| 36
| 0.795918
| 14
| 98
| 5.214286
| 0.642857
| 0.356164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 5
| 37
| 19.6
| 0.858824
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d1882e0ec192c23f3dbdbd777675d2c106a7b9ea
| 128
|
py
|
Python
|
bin/apache-hive-3.1.2-bin/lib/py/thrift/reflection/__init__.py
|
ptrick/hdfs-hive-sql-playground
|
83f2aaa79f022a3c320939eace1fd2d06583187f
|
[
"Apache-2.0"
] | null | null | null |
bin/apache-hive-3.1.2-bin/lib/py/thrift/reflection/__init__.py
|
ptrick/hdfs-hive-sql-playground
|
83f2aaa79f022a3c320939eace1fd2d06583187f
|
[
"Apache-2.0"
] | null | null | null |
bin/apache-hive-3.1.2-bin/lib/py/thrift/reflection/__init__.py
|
ptrick/hdfs-hive-sql-playground
|
83f2aaa79f022a3c320939eace1fd2d06583187f
|
[
"Apache-2.0"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:321f6bda5d0842186e6228899745f9973ef88558c1b07a951889f94b47e9499e
size 807
| 32
| 75
| 0.882813
| 13
| 128
| 8.692308
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.45082
| 0.046875
| 128
| 3
| 76
| 42.666667
| 0.47541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d1c9cbef22afa42555d886872b440e0476143964
| 40,387
|
py
|
Python
|
cottonformation/res/msk.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/msk.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/msk.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropClusterS3(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.S3"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-enabled
- ``p_Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-bucket
- ``p_Prefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-prefix
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.S3"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-enabled"""
p_Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-bucket"""
p_Prefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Prefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-s3.html#cfn-msk-cluster-s3-prefix"""
@attr.s
class PropClusterCloudWatchLogs(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.CloudWatchLogs"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-cloudwatchlogs.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-cloudwatchlogs.html#cfn-msk-cluster-cloudwatchlogs-enabled
- ``p_LogGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-cloudwatchlogs.html#cfn-msk-cluster-cloudwatchlogs-loggroup
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.CloudWatchLogs"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-cloudwatchlogs.html#cfn-msk-cluster-cloudwatchlogs-enabled"""
p_LogGroup: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogGroup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-cloudwatchlogs.html#cfn-msk-cluster-cloudwatchlogs-loggroup"""
@attr.s
class PropClusterPublicAccess(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.PublicAccess"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-publicaccess.html
Property Document:
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-publicaccess.html#cfn-msk-cluster-publicaccess-type
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.PublicAccess"
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-publicaccess.html#cfn-msk-cluster-publicaccess-type"""
@attr.s
class PropClusterEncryptionAtRest(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.EncryptionAtRest"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionatrest.html
Property Document:
- ``rp_DataVolumeKMSKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionatrest.html#cfn-msk-cluster-encryptionatrest-datavolumekmskeyid
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.EncryptionAtRest"
rp_DataVolumeKMSKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DataVolumeKMSKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionatrest.html#cfn-msk-cluster-encryptionatrest-datavolumekmskeyid"""
@attr.s
class PropClusterUnauthenticated(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Unauthenticated"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-unauthenticated.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-unauthenticated.html#cfn-msk-cluster-unauthenticated-enabled
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Unauthenticated"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-unauthenticated.html#cfn-msk-cluster-unauthenticated-enabled"""
@attr.s
class PropClusterEncryptionInTransit(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.EncryptionInTransit"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionintransit.html
Property Document:
- ``p_ClientBroker``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionintransit.html#cfn-msk-cluster-encryptionintransit-clientbroker
- ``p_InCluster``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionintransit.html#cfn-msk-cluster-encryptionintransit-incluster
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.EncryptionInTransit"
p_ClientBroker: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ClientBroker"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionintransit.html#cfn-msk-cluster-encryptionintransit-clientbroker"""
p_InCluster: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "InCluster"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptionintransit.html#cfn-msk-cluster-encryptionintransit-incluster"""
@attr.s
class PropClusterEncryptionInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.EncryptionInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptioninfo.html
Property Document:
- ``p_EncryptionAtRest``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptioninfo.html#cfn-msk-cluster-encryptioninfo-encryptionatrest
- ``p_EncryptionInTransit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptioninfo.html#cfn-msk-cluster-encryptioninfo-encryptionintransit
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.EncryptionInfo"
p_EncryptionAtRest: typing.Union['PropClusterEncryptionAtRest', dict] = attr.ib(
default=None,
converter=PropClusterEncryptionAtRest.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterEncryptionAtRest)),
metadata={AttrMeta.PROPERTY_NAME: "EncryptionAtRest"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptioninfo.html#cfn-msk-cluster-encryptioninfo-encryptionatrest"""
p_EncryptionInTransit: typing.Union['PropClusterEncryptionInTransit', dict] = attr.ib(
default=None,
converter=PropClusterEncryptionInTransit.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterEncryptionInTransit)),
metadata={AttrMeta.PROPERTY_NAME: "EncryptionInTransit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-encryptioninfo.html#cfn-msk-cluster-encryptioninfo-encryptionintransit"""
@attr.s
class PropClusterIam(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Iam"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-iam.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-iam.html#cfn-msk-cluster-iam-enabled
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Iam"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-iam.html#cfn-msk-cluster-iam-enabled"""
@attr.s
class PropClusterConfigurationInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.ConfigurationInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-configurationinfo.html
Property Document:
- ``rp_Arn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-configurationinfo.html#cfn-msk-cluster-configurationinfo-arn
- ``rp_Revision``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-configurationinfo.html#cfn-msk-cluster-configurationinfo-revision
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.ConfigurationInfo"
rp_Arn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Arn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-configurationinfo.html#cfn-msk-cluster-configurationinfo-arn"""
rp_Revision: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Revision"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-configurationinfo.html#cfn-msk-cluster-configurationinfo-revision"""
@attr.s
class PropClusterScram(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Scram"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-scram.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-scram.html#cfn-msk-cluster-scram-enabled
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Scram"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-scram.html#cfn-msk-cluster-scram-enabled"""
@attr.s
class PropClusterJmxExporter(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.JmxExporter"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-jmxexporter.html
Property Document:
- ``rp_EnabledInBroker``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-jmxexporter.html#cfn-msk-cluster-jmxexporter-enabledinbroker
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.JmxExporter"
rp_EnabledInBroker: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "EnabledInBroker"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-jmxexporter.html#cfn-msk-cluster-jmxexporter-enabledinbroker"""
@attr.s
class PropClusterConnectivityInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.ConnectivityInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-connectivityinfo.html
Property Document:
- ``p_PublicAccess``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-connectivityinfo.html#cfn-msk-cluster-connectivityinfo-publicaccess
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.ConnectivityInfo"
p_PublicAccess: typing.Union['PropClusterPublicAccess', dict] = attr.ib(
default=None,
converter=PropClusterPublicAccess.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterPublicAccess)),
metadata={AttrMeta.PROPERTY_NAME: "PublicAccess"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-connectivityinfo.html#cfn-msk-cluster-connectivityinfo-publicaccess"""
@attr.s
class PropClusterNodeExporter(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.NodeExporter"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-nodeexporter.html
Property Document:
- ``rp_EnabledInBroker``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-nodeexporter.html#cfn-msk-cluster-nodeexporter-enabledinbroker
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.NodeExporter"
rp_EnabledInBroker: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "EnabledInBroker"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-nodeexporter.html#cfn-msk-cluster-nodeexporter-enabledinbroker"""
@attr.s
class PropClusterEBSStorageInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.EBSStorageInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-ebsstorageinfo.html
Property Document:
- ``p_VolumeSize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-ebsstorageinfo.html#cfn-msk-cluster-ebsstorageinfo-volumesize
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.EBSStorageInfo"
p_VolumeSize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "VolumeSize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-ebsstorageinfo.html#cfn-msk-cluster-ebsstorageinfo-volumesize"""
@attr.s
class PropClusterFirehose(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Firehose"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-firehose.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-firehose.html#cfn-msk-cluster-firehose-enabled
- ``p_DeliveryStream``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-firehose.html#cfn-msk-cluster-firehose-deliverystream
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Firehose"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-firehose.html#cfn-msk-cluster-firehose-enabled"""
p_DeliveryStream: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DeliveryStream"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-firehose.html#cfn-msk-cluster-firehose-deliverystream"""
@attr.s
class PropClusterTls(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Tls"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-tls.html
Property Document:
- ``p_CertificateAuthorityArnList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-tls.html#cfn-msk-cluster-tls-certificateauthorityarnlist
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-tls.html#cfn-msk-cluster-tls-enabled
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Tls"
p_CertificateAuthorityArnList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "CertificateAuthorityArnList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-tls.html#cfn-msk-cluster-tls-certificateauthorityarnlist"""
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-tls.html#cfn-msk-cluster-tls-enabled"""
@attr.s
class PropClusterBrokerLogs(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.BrokerLogs"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html
Property Document:
- ``p_CloudWatchLogs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-cloudwatchlogs
- ``p_Firehose``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-firehose
- ``p_S3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-s3
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.BrokerLogs"
p_CloudWatchLogs: typing.Union['PropClusterCloudWatchLogs', dict] = attr.ib(
default=None,
converter=PropClusterCloudWatchLogs.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterCloudWatchLogs)),
metadata={AttrMeta.PROPERTY_NAME: "CloudWatchLogs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-cloudwatchlogs"""
p_Firehose: typing.Union['PropClusterFirehose', dict] = attr.ib(
default=None,
converter=PropClusterFirehose.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterFirehose)),
metadata={AttrMeta.PROPERTY_NAME: "Firehose"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-firehose"""
p_S3: typing.Union['PropClusterS3', dict] = attr.ib(
default=None,
converter=PropClusterS3.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterS3)),
metadata={AttrMeta.PROPERTY_NAME: "S3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokerlogs.html#cfn-msk-cluster-brokerlogs-s3"""
@attr.s
class PropClusterPrometheus(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Prometheus"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-prometheus.html
Property Document:
- ``p_JmxExporter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-prometheus.html#cfn-msk-cluster-prometheus-jmxexporter
- ``p_NodeExporter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-prometheus.html#cfn-msk-cluster-prometheus-nodeexporter
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Prometheus"
p_JmxExporter: typing.Union['PropClusterJmxExporter', dict] = attr.ib(
default=None,
converter=PropClusterJmxExporter.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterJmxExporter)),
metadata={AttrMeta.PROPERTY_NAME: "JmxExporter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-prometheus.html#cfn-msk-cluster-prometheus-jmxexporter"""
p_NodeExporter: typing.Union['PropClusterNodeExporter', dict] = attr.ib(
default=None,
converter=PropClusterNodeExporter.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterNodeExporter)),
metadata={AttrMeta.PROPERTY_NAME: "NodeExporter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-prometheus.html#cfn-msk-cluster-prometheus-nodeexporter"""
@attr.s
class PropClusterLoggingInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.LoggingInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-logginginfo.html
Property Document:
- ``rp_BrokerLogs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-logginginfo.html#cfn-msk-cluster-logginginfo-brokerlogs
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.LoggingInfo"
rp_BrokerLogs: typing.Union['PropClusterBrokerLogs', dict] = attr.ib(
default=None,
converter=PropClusterBrokerLogs.from_dict,
validator=attr.validators.instance_of(PropClusterBrokerLogs),
metadata={AttrMeta.PROPERTY_NAME: "BrokerLogs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-logginginfo.html#cfn-msk-cluster-logginginfo-brokerlogs"""
@attr.s
class PropClusterSasl(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.Sasl"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-sasl.html
Property Document:
- ``p_Iam``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-sasl.html#cfn-msk-cluster-sasl-iam
- ``p_Scram``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-sasl.html#cfn-msk-cluster-sasl-scram
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.Sasl"
p_Iam: typing.Union['PropClusterIam', dict] = attr.ib(
default=None,
converter=PropClusterIam.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterIam)),
metadata={AttrMeta.PROPERTY_NAME: "Iam"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-sasl.html#cfn-msk-cluster-sasl-iam"""
p_Scram: typing.Union['PropClusterScram', dict] = attr.ib(
default=None,
converter=PropClusterScram.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterScram)),
metadata={AttrMeta.PROPERTY_NAME: "Scram"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-sasl.html#cfn-msk-cluster-sasl-scram"""
@attr.s
class PropClusterStorageInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.StorageInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-storageinfo.html
Property Document:
- ``p_EBSStorageInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-storageinfo.html#cfn-msk-cluster-storageinfo-ebsstorageinfo
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.StorageInfo"
p_EBSStorageInfo: typing.Union['PropClusterEBSStorageInfo', dict] = attr.ib(
default=None,
converter=PropClusterEBSStorageInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterEBSStorageInfo)),
metadata={AttrMeta.PROPERTY_NAME: "EBSStorageInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-storageinfo.html#cfn-msk-cluster-storageinfo-ebsstorageinfo"""
@attr.s
class PropClusterClientAuthentication(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.ClientAuthentication"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html
Property Document:
- ``p_Sasl``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-sasl
- ``p_Tls``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-tls
- ``p_Unauthenticated``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-unauthenticated
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.ClientAuthentication"
p_Sasl: typing.Union['PropClusterSasl', dict] = attr.ib(
default=None,
converter=PropClusterSasl.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterSasl)),
metadata={AttrMeta.PROPERTY_NAME: "Sasl"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-sasl"""
p_Tls: typing.Union['PropClusterTls', dict] = attr.ib(
default=None,
converter=PropClusterTls.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterTls)),
metadata={AttrMeta.PROPERTY_NAME: "Tls"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-tls"""
p_Unauthenticated: typing.Union['PropClusterUnauthenticated', dict] = attr.ib(
default=None,
converter=PropClusterUnauthenticated.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterUnauthenticated)),
metadata={AttrMeta.PROPERTY_NAME: "Unauthenticated"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-clientauthentication.html#cfn-msk-cluster-clientauthentication-unauthenticated"""
@attr.s
class PropClusterOpenMonitoring(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.OpenMonitoring"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-openmonitoring.html
Property Document:
- ``rp_Prometheus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-openmonitoring.html#cfn-msk-cluster-openmonitoring-prometheus
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.OpenMonitoring"
rp_Prometheus: typing.Union['PropClusterPrometheus', dict] = attr.ib(
default=None,
converter=PropClusterPrometheus.from_dict,
validator=attr.validators.instance_of(PropClusterPrometheus),
metadata={AttrMeta.PROPERTY_NAME: "Prometheus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-openmonitoring.html#cfn-msk-cluster-openmonitoring-prometheus"""
@attr.s
class PropClusterBrokerNodeGroupInfo(Property):
"""
AWS Object Type = "AWS::MSK::Cluster.BrokerNodeGroupInfo"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html
Property Document:
- ``rp_ClientSubnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-clientsubnets
- ``rp_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-instancetype
- ``p_BrokerAZDistribution``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-brokerazdistribution
- ``p_ConnectivityInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-connectivityinfo
- ``p_SecurityGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-securitygroups
- ``p_StorageInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-storageinfo
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster.BrokerNodeGroupInfo"
rp_ClientSubnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "ClientSubnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-clientsubnets"""
rp_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-instancetype"""
p_BrokerAZDistribution: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BrokerAZDistribution"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-brokerazdistribution"""
p_ConnectivityInfo: typing.Union['PropClusterConnectivityInfo', dict] = attr.ib(
default=None,
converter=PropClusterConnectivityInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterConnectivityInfo)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectivityInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-connectivityinfo"""
p_SecurityGroups: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-securitygroups"""
p_StorageInfo: typing.Union['PropClusterStorageInfo', dict] = attr.ib(
default=None,
converter=PropClusterStorageInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterStorageInfo)),
metadata={AttrMeta.PROPERTY_NAME: "StorageInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-msk-cluster-brokernodegroupinfo.html#cfn-msk-cluster-brokernodegroupinfo-storageinfo"""
#--- Resource declaration ---
@attr.s
class Cluster(Resource):
"""
AWS Object Type = "AWS::MSK::Cluster"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html
Property Document:
- ``rp_BrokerNodeGroupInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-brokernodegroupinfo
- ``rp_ClusterName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-clustername
- ``rp_KafkaVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-kafkaversion
- ``rp_NumberOfBrokerNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-numberofbrokernodes
- ``p_ClientAuthentication``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-clientauthentication
- ``p_ConfigurationInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-configurationinfo
- ``p_EncryptionInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-encryptioninfo
- ``p_EnhancedMonitoring``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-enhancedmonitoring
- ``p_LoggingInfo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-logginginfo
- ``p_OpenMonitoring``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-openmonitoring
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-tags
"""
AWS_OBJECT_TYPE = "AWS::MSK::Cluster"
rp_BrokerNodeGroupInfo: typing.Union['PropClusterBrokerNodeGroupInfo', dict] = attr.ib(
default=None,
converter=PropClusterBrokerNodeGroupInfo.from_dict,
validator=attr.validators.instance_of(PropClusterBrokerNodeGroupInfo),
metadata={AttrMeta.PROPERTY_NAME: "BrokerNodeGroupInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-brokernodegroupinfo"""
rp_ClusterName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ClusterName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-clustername"""
rp_KafkaVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "KafkaVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-kafkaversion"""
rp_NumberOfBrokerNodes: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "NumberOfBrokerNodes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-numberofbrokernodes"""
p_ClientAuthentication: typing.Union['PropClusterClientAuthentication', dict] = attr.ib(
default=None,
converter=PropClusterClientAuthentication.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterClientAuthentication)),
metadata={AttrMeta.PROPERTY_NAME: "ClientAuthentication"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-clientauthentication"""
p_ConfigurationInfo: typing.Union['PropClusterConfigurationInfo', dict] = attr.ib(
default=None,
converter=PropClusterConfigurationInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterConfigurationInfo)),
metadata={AttrMeta.PROPERTY_NAME: "ConfigurationInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-configurationinfo"""
p_EncryptionInfo: typing.Union['PropClusterEncryptionInfo', dict] = attr.ib(
default=None,
converter=PropClusterEncryptionInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterEncryptionInfo)),
metadata={AttrMeta.PROPERTY_NAME: "EncryptionInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-encryptioninfo"""
p_EnhancedMonitoring: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EnhancedMonitoring"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-enhancedmonitoring"""
p_LoggingInfo: typing.Union['PropClusterLoggingInfo', dict] = attr.ib(
default=None,
converter=PropClusterLoggingInfo.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterLoggingInfo)),
metadata={AttrMeta.PROPERTY_NAME: "LoggingInfo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-logginginfo"""
p_OpenMonitoring: typing.Union['PropClusterOpenMonitoring', dict] = attr.ib(
default=None,
converter=PropClusterOpenMonitoring.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropClusterOpenMonitoring)),
metadata={AttrMeta.PROPERTY_NAME: "OpenMonitoring"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-openmonitoring"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-msk-cluster.html#cfn-msk-cluster-tags"""
| 53.921228
| 204
| 0.749697
| 4,490
| 40,387
| 6.665479
| 0.029399
| 0.097233
| 0.048884
| 0.075548
| 0.838746
| 0.838613
| 0.812817
| 0.741881
| 0.74178
| 0.740744
| 0
| 0.000698
| 0.113576
| 40,387
| 748
| 205
| 53.993316
| 0.835279
| 0.344368
| 0
| 0.328
| 0
| 0
| 0.108564
| 0.067679
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010667
| 0
| 0.288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d1d8de48543193073dacbc606b325859efe40767
| 322
|
py
|
Python
|
lims/models/__init__.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | 1
|
2020-03-20T23:00:24.000Z
|
2020-03-20T23:00:24.000Z
|
lims/models/__init__.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | null | null | null |
lims/models/__init__.py
|
razorlabs/BRIMS-backend
|
2c5b7bd126debec459b775e9d11e96fc09975059
|
[
"MIT"
] | 1
|
2020-03-09T09:57:25.000Z
|
2020-03-09T09:57:25.000Z
|
# Initialization file to import models from sub-organized sources into a single "models" import for django injestion
from lims.models.user import *
from lims.models.schedule import *
from lims.models.shipping import *
from lims.models.storage import *
from lims.models.patient import *
from lims.models.specimen import *
| 35.777778
| 116
| 0.801242
| 47
| 322
| 5.489362
| 0.468085
| 0.186047
| 0.325581
| 0.387597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 322
| 8
| 117
| 40.25
| 0.921429
| 0.354037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
061650c680f85973a286c989ab9e6122fccebbe7
| 18,370
|
py
|
Python
|
rsp1570serial/tests/test_messages.py
|
pp81381/rsp1570serial
|
9a31be2578f00905a1df2a78a46f3c87631cd177
|
[
"MIT"
] | 1
|
2020-03-06T06:04:18.000Z
|
2020-03-06T06:04:18.000Z
|
rsp1570serial/tests/test_messages.py
|
pp81381/rsp1570serial
|
9a31be2578f00905a1df2a78a46f3c87631cd177
|
[
"MIT"
] | null | null | null |
rsp1570serial/tests/test_messages.py
|
pp81381/rsp1570serial
|
9a31be2578f00905a1df2a78a46f3c87631cd177
|
[
"MIT"
] | 1
|
2020-01-21T18:27:07.000Z
|
2020-01-21T18:27:07.000Z
|
import aiounittest
import logging
from rsp1570serial.commands import (
encode_command,
MSGTYPE_PRIMARY_COMMANDS,
MSGTYPE_VOLUME_DIRECT_COMMANDS,
)
from rsp1570serial.messages import decode_message_stream, FeedbackMessage
from rsp1570serial.protocol import StreamProxy
import unittest
async def decode_all_messages(ser):
messages = []
async for command in decode_message_stream(ser):
messages.append(command)
return messages
async def decode_single_message(ser):
messages = await decode_all_messages(ser)
assert len(messages) == 1
return messages[0]
class AsyncRotelTestMessages(aiounittest.AsyncTestCase):
async def test_encode_decode(self):
message = encode_command("POWER_TOGGLE")
with self.assertLogs(level=logging.INFO) as cm:
command = await decode_single_message(StreamProxy(message))
self.assertEqual(command.message_type, MSGTYPE_PRIMARY_COMMANDS)
self.assertEqual(command.key, b"\x0a")
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
async def test_encode_decode_with_meta(self):
message = encode_command("VOLUME_40")
with self.assertLogs(level=logging.INFO) as cm:
command = await decode_single_message(StreamProxy(message))
self.assertEqual(command.message_type, MSGTYPE_VOLUME_DIRECT_COMMANDS)
self.assertEqual(command.key, b"\x28")
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
async def test_decode_feedback_message(self):
with self.assertLogs(level=logging.INFO) as cm:
display = await decode_single_message(
StreamProxy(
b"\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xf2"
)
)
self.assertEqual(display.lines[0], "FIRE TV VOL 64")
self.assertEqual(display.lines[1], "DOLBY PL\x19 C 48K ")
self.assertCountEqual(
display.icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
fields = display.parse_display_lines()
self.assertEqual(fields["is_on"], True)
self.assertEqual(fields["source_name"], "FIRE TV")
self.assertEqual(fields["volume"], 64)
self.assertEqual(fields["mute_on"], False)
self.assertEqual(fields["party_mode_on"], False)
self.assertEqual(fields["info"], "DOLBY PLII C 48K")
self.assertEqual(fields["rec_source"], None)
self.assertEqual(fields["zone2_source"], None)
self.assertEqual(fields["zone2_volume"], None)
self.assertEqual(fields["zone3_source"], None)
self.assertEqual(fields["zone3_volume"], None)
self.assertEqual(fields["zone4_source"], None)
self.assertEqual(fields["zone4_volume"], None)
async def test_decode_feedback_after_power_off(self):
with self.assertLogs(level=logging.INFO) as cm:
display = await decode_single_message(
StreamProxy(
b"\xfe1\xa3 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\xfc"
)
)
self.assertEqual(
display.lines[0],
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
)
self.assertEqual(
display.lines[1],
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
)
self.assertCountEqual(display.icons_that_are_on(), ["Standby LED"])
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
fields = display.parse_display_lines()
self.assertEqual(fields["is_on"], False)
self.assertEqual(fields["source_name"], None)
self.assertEqual(fields["volume"], None)
self.assertEqual(fields["mute_on"], None)
self.assertEqual(fields["party_mode_on"], None)
self.assertEqual(fields["info"], None)
self.assertEqual(fields["rec_source"], None)
self.assertEqual(fields["zone2_source"], None)
self.assertEqual(fields["zone2_volume"], None)
self.assertEqual(fields["zone3_source"], None)
self.assertEqual(fields["zone3_volume"], None)
self.assertEqual(fields["zone4_source"], None)
self.assertEqual(fields["zone4_volume"], None)
async def test_decode_feedback_message_with_rec_source(self):
display = FeedbackMessage(
"FIRE TV VOL 64", " REC SOURCE ", b"\x00F\x08\x00\xfc"
)
self.assertCountEqual(
display.icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
fields = display.parse_display_lines()
self.assertEqual(fields["is_on"], True)
self.assertEqual(fields["source_name"], "FIRE TV")
self.assertEqual(fields["volume"], 64)
self.assertEqual(fields["mute_on"], False)
self.assertEqual(fields["party_mode_on"], False)
self.assertEqual(fields["info"], "REC SOURCE")
self.assertEqual(fields["rec_source"], "SOURCE")
self.assertEqual(fields["zone2_source"], None)
self.assertEqual(fields["zone2_volume"], None)
self.assertEqual(fields["zone3_source"], None)
self.assertEqual(fields["zone3_volume"], None)
self.assertEqual(fields["zone4_source"], None)
self.assertEqual(fields["zone4_volume"], None)
async def test_decode_feedback_message_with_zone_source(self):
display = FeedbackMessage(
"FIRE TV VOL 64", " ZONE4 TUNER ", b"\x00F\x08\x00\xfc"
)
self.assertCountEqual(
display.icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
fields = display.parse_display_lines()
self.assertEqual(fields["is_on"], True)
self.assertEqual(fields["source_name"], "FIRE TV")
self.assertEqual(fields["volume"], 64)
self.assertEqual(fields["mute_on"], False)
self.assertEqual(fields["party_mode_on"], False)
self.assertEqual(fields["info"], "ZONE4 TUNER")
self.assertEqual(fields["rec_source"], None)
self.assertEqual(fields["zone2_source"], None)
self.assertEqual(fields["zone2_volume"], None)
self.assertEqual(fields["zone3_source"], None)
self.assertEqual(fields["zone3_volume"], None)
self.assertEqual(fields["zone4_source"], "TUNER")
self.assertEqual(fields["zone4_volume"], None)
async def test_decode_feedback_message_with_zone_volume_and_party_mode(self):
display = FeedbackMessage(
"FIRE TV pty VOL 64", " ZONE3 VOL 55 ", b"\x00F\x08\x00\xfc"
)
self.assertCountEqual(
display.icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
fields = display.parse_display_lines()
self.assertEqual(fields["is_on"], True)
self.assertEqual(fields["source_name"], "FIRE TV")
self.assertEqual(fields["volume"], 64)
self.assertEqual(fields["mute_on"], False)
self.assertEqual(fields["party_mode_on"], True)
self.assertEqual(fields["info"], "ZONE3 VOL 55")
self.assertEqual(fields["rec_source"], None)
self.assertEqual(fields["zone2_source"], None)
self.assertEqual(fields["zone2_volume"], None)
self.assertEqual(fields["zone3_source"], None)
self.assertEqual(fields["zone3_volume"], 55)
self.assertEqual(fields["zone4_source"], None)
self.assertEqual(fields["zone4_volume"], None)
async def test_decode_feedback_message_with_meta_escape(self):
"""
Test feedback message that generates a checksum that needs to be escaped
I have verified that the real device does generate this message
"""
with self.assertLogs(level=logging.INFO) as cm:
display = await decode_single_message(
StreamProxy(
b"\xfe1\xa3 VIRE TV VOL 60DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xfd\x01"
)
)
self.assertEqual(display.lines[0], "VIRE TV VOL 60")
self.assertEqual(display.lines[1], "DOLBY PL\x19 C 48K ")
self.assertCountEqual(
display.icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
async def test_decode_trigger_message(self):
with self.assertLogs(level=logging.INFO) as cm:
trigger = await decode_single_message(
StreamProxy(b"\xfe\x07\xa3\x21\x01\x01\x00\x00\x00\xcd")
)
trigger.log()
self.assertEqual(
cm.output,
[
"INFO:rsp1570serial.protocol:Finished reading messages",
"INFO:rsp1570serial.messages:["
"['All', ['on', 'off', 'off', 'off', 'off', 'off']], "
"['Main', ['on', 'off', 'off', 'off', 'off', 'off']], "
"['Zone 2', ['off', 'off', 'off', 'off', 'off', 'off']], "
"['Zone 3', ['off', 'off', 'off', 'off', 'off', 'off']], "
"['Zone 4', ['off', 'off', 'off', 'off', 'off', 'off']]]",
],
)
async def test_decode_stream1(self):
ser = StreamProxy(
b"\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xf2\xfe1\xa3 CATV VOL 63DOLBY PL\x19 M 48K \x00F\x08\x00\xfc\x99"
)
with self.assertLogs(level=logging.INFO) as cm:
feedback_messages = await decode_all_messages(ser)
self.assertEqual(len(feedback_messages), 2)
self.assertEqual(feedback_messages[0].lines[0], "FIRE TV VOL 64")
self.assertEqual(feedback_messages[0].lines[1], "DOLBY PL\x19 C 48K ")
self.assertCountEqual(
feedback_messages[0].icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(feedback_messages[1].lines[0], "CATV VOL 63")
self.assertEqual(feedback_messages[1].lines[1], "DOLBY PL\x19 M 48K ")
self.assertCountEqual(
feedback_messages[1].icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output, ["INFO:rsp1570serial.protocol:Finished reading messages"]
)
async def test_decode_stream2(self):
""" Deliberately removed the start byte from the first message. Rest of first message will be reported as unexpected bytes. """
ser = StreamProxy(
b"1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xf2\xfe1\xa3 CATV VOL 63DOLBY PL\x19 M 48K \x00F\x08\x00\xfc\x99"
)
with self.assertLogs(level=logging.INFO) as cm:
feedback_messages = await decode_all_messages(ser)
self.assertEqual(len(feedback_messages), 1)
self.assertEqual(feedback_messages[0].lines[0], "CATV VOL 63")
self.assertEqual(feedback_messages[0].lines[1], "DOLBY PL\x19 M 48K ")
self.assertCountEqual(
feedback_messages[0].icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output,
[
"WARNING:rsp1570serial.protocol:51 unexpected bytes encountered while waiting for START_BYTE: bytearray(b'1\\xa3 FIRE TV VOL 64DOLBY PL\\x19 C 48K \\x00F\\x08\\x00\\xfc\\xf2')",
"INFO:rsp1570serial.protocol:Finished reading messages",
],
)
async def test_decode_stream3(self):
""" Deliberately close early. Close method will report discarded payload. """
ser = StreamProxy(
b"\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xf2\xfe1\xa3 CATV VOL 63DOLBY PL\x19 M 48K \x00F\x08\x00\xfc"
)
with self.assertLogs(level=logging.INFO) as cm:
feedback_messages = await decode_all_messages(ser)
self.assertEqual(len(feedback_messages), 1)
self.assertEqual(feedback_messages[0].lines[0], "FIRE TV VOL 64")
self.assertEqual(feedback_messages[0].lines[1], "DOLBY PL\x19 C 48K ")
self.assertCountEqual(
feedback_messages[0].icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output,
[
"ERROR:rsp1570serial.protocol:Unexpected EOF encountered. Work in progress discarded: bytearray(b'1\\xa3 CATV VOL 63DOLBY PL\\x19 M 48K \\x00F\\x08\\x00\\xfc')",
"INFO:rsp1570serial.protocol:Finished reading messages",
],
)
async def test_decode_stream4(self):
"""
Deliberately truncate first message.
Partial message will be discarded when unescaped start byte encountered.
Next message will be treated as unexpected bytes when EOF encountered.
"""
ser = StreamProxy(
b"\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xfe1\xa3 CATV VOL 63DOLBY PL\x19 M 48K \x00F\x08\x00\xfc\x99"
)
with self.assertLogs(level=logging.INFO) as cm:
feedback_messages = await decode_all_messages(ser)
self.assertEqual(len(feedback_messages), 0)
self.assertEqual(
cm.output,
[
"ERROR:rsp1570serial.protocol:Invalid byte encountered while processing message content. Work in progress discarded: bytearray(b'1\\xa3 FIRE TV VOL 64DOLBY PL\\x19 C 48K \\x00F\\x08\\x00\\xfc')",
"WARNING:rsp1570serial.protocol:51 unexpected bytes discarded when EOF encountered: bytearray(b'1\\xa3 CATV VOL 63DOLBY PL\\x19 M 48K \\x00F\\x08\\x00\\xfc\\x99')",
"INFO:rsp1570serial.protocol:Finished reading messages",
],
)
async def test_decode_stream5(self):
"""
Deliberately truncate first message.
Partial message will be discarded when unescaped start byte encountered.
Next message will be treated as unexpected bytes while waiting for a start byte.
Next message should be read normally
"""
ser = StreamProxy(
b"\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xfe1\xa3 CATV VOL 63DOLBY PL\x19 M 48K \x00F\x08\x00\xfc\x99\xfe1\xa3 FIRE TV VOL 64DOLBY PL\x19 C 48K \x00F\x08\x00\xfc\xf2"
)
with self.assertLogs(level=logging.INFO) as cm:
feedback_messages = await decode_all_messages(ser)
self.assertEqual(len(feedback_messages), 1)
self.assertEqual(feedback_messages[0].lines[0], "FIRE TV VOL 64")
self.assertEqual(feedback_messages[0].lines[1], "DOLBY PL\x19 C 48K ")
self.assertCountEqual(
feedback_messages[0].icons_that_are_on(),
[
"II",
"HDMI",
"Pro Logic",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
)
self.assertEqual(
cm.output,
[
"ERROR:rsp1570serial.protocol:Invalid byte encountered while processing message content. Work in progress discarded: bytearray(b'1\\xa3 FIRE TV VOL 64DOLBY PL\\x19 C 48K \\x00F\\x08\\x00\\xfc')",
"WARNING:rsp1570serial.protocol:51 unexpected bytes encountered while waiting for START_BYTE: bytearray(b'1\\xa3 CATV VOL 63DOLBY PL\\x19 M 48K \\x00F\\x08\\x00\\xfc\\x99')",
"INFO:rsp1570serial.protocol:Finished reading messages",
],
)
| 41.096197
| 239
| 0.547959
| 1,987
| 18,370
| 4.948666
| 0.099648
| 0.154073
| 0.074138
| 0.093969
| 0.860877
| 0.827621
| 0.802095
| 0.793857
| 0.778806
| 0.773213
| 0
| 0.060338
| 0.334186
| 18,370
| 446
| 240
| 41.188341
| 0.743602
| 0
| 0
| 0.650367
| 0
| 0.051345
| 0.288114
| 0.085143
| 0
| 0
| 0
| 0
| 0.303178
| 1
| 0
| false
| 0
| 0.01467
| 0
| 0.022005
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ae1f04906b60a3a6eedff92ce26311245c2f9944
| 21
|
py
|
Python
|
old_blds/EllipsePy/__init__.py
|
lalithu/EllipsePy
|
9a1152c8ce60c45388cbb0eab930da0e3fff9cf1
|
[
"MIT"
] | 1
|
2021-01-24T21:56:12.000Z
|
2021-01-24T21:56:12.000Z
|
old_blds/EllipsePy/__init__.py
|
lalithu/EllipsePy
|
9a1152c8ce60c45388cbb0eab930da0e3fff9cf1
|
[
"MIT"
] | null | null | null |
old_blds/EllipsePy/__init__.py
|
lalithu/EllipsePy
|
9a1152c8ce60c45388cbb0eab930da0e3fff9cf1
|
[
"MIT"
] | null | null | null |
# By Lalith U | 2021
| 10.5
| 20
| 0.619048
| 4
| 21
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 0.285714
| 21
| 1
| 21
| 21
| 0.6
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ee0d4e67d2b97ee1ed136c8f50bbf0947a795833
| 104
|
py
|
Python
|
tests/db_test.py
|
fushinari/wn
|
be274eda62316622f6bb0e548b5e2a6834dc216a
|
[
"MIT"
] | null | null | null |
tests/db_test.py
|
fushinari/wn
|
be274eda62316622f6bb0e548b5e2a6834dc216a
|
[
"MIT"
] | null | null | null |
tests/db_test.py
|
fushinari/wn
|
be274eda62316622f6bb0e548b5e2a6834dc216a
|
[
"MIT"
] | null | null | null |
from wn import _db
def test_schema_compatibility():
assert _db.is_schema_compatible(create=True)
| 14.857143
| 48
| 0.788462
| 15
| 104
| 5.066667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 104
| 6
| 49
| 17.333333
| 0.853933
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ee6637ae512db06203e60ea8d72a6883fe06f46c
| 140
|
py
|
Python
|
app/routes.py
|
kn-xu/stravinsky
|
88022fc1ea99b91df51b65b9e12bf4a8ef4ac738
|
[
"MIT"
] | 1
|
2019-09-03T15:18:14.000Z
|
2019-09-03T15:18:14.000Z
|
app/routes.py
|
kn-xu/stravinsky
|
88022fc1ea99b91df51b65b9e12bf4a8ef4ac738
|
[
"MIT"
] | null | null | null |
app/routes.py
|
kn-xu/stravinsky
|
88022fc1ea99b91df51b65b9e12bf4a8ef4ac738
|
[
"MIT"
] | null | null | null |
from app import app
from flask import render_template
@app.route('/')
def index():
return render_template('home.html', title='track')
| 17.5
| 54
| 0.721429
| 20
| 140
| 4.95
| 0.7
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 140
| 7
| 55
| 20
| 0.825
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
c9f856d50f910a9ca16b305bf88018c61162d750
| 38
|
py
|
Python
|
assemblyline/alsvc_characterize/__init__.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 46
|
2017-05-15T11:15:08.000Z
|
2018-07-02T03:32:52.000Z
|
assemblyline/alsvc_characterize/__init__.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | null | null | null |
assemblyline/alsvc_characterize/__init__.py
|
dendisuhubdy/grokmachine
|
120a21a25c2730ed356739231ec8b99fc0575c8b
|
[
"BSD-3-Clause"
] | 24
|
2017-05-17T03:26:17.000Z
|
2018-07-09T07:00:50.000Z
|
from characterize import Characterize
| 19
| 37
| 0.894737
| 4
| 38
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a016b82f70d13dcdc3be8c8eaab710f41d9a7562
| 22
|
py
|
Python
|
weather/__init__.py
|
JohnCalhoun/weather-api-demo
|
f07161dd7a0c3bef5e95c8cfd7a306a62f53e2a0
|
[
"MIT"
] | null | null | null |
weather/__init__.py
|
JohnCalhoun/weather-api-demo
|
f07161dd7a0c3bef5e95c8cfd7a306a62f53e2a0
|
[
"MIT"
] | null | null | null |
weather/__init__.py
|
JohnCalhoun/weather-api-demo
|
f07161dd7a0c3bef5e95c8cfd7a306a62f53e2a0
|
[
"MIT"
] | null | null | null |
from .code import API
| 11
| 21
| 0.772727
| 4
| 22
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e6a0bca9c00d8944a5b3ea91cd3f4865f037b45
| 218
|
py
|
Python
|
online_pharmacy/pharmacy/admin.py
|
geekyJock8/online_pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 5
|
2020-09-09T13:59:17.000Z
|
2021-09-30T07:20:55.000Z
|
online_pharmacy/pharmacy/admin.py
|
geekyJock8/online_pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 10
|
2017-09-03T06:13:31.000Z
|
2017-10-10T15:22:30.000Z
|
online_pharmacy/pharmacy/admin.py
|
geekyJock8/Online-Pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 9
|
2017-09-03T04:59:18.000Z
|
2019-10-17T11:33:18.000Z
|
from django.contrib import admin
from .models import pharmacy,contact_pharmacy,pharmacy_notifications
admin.site.register(pharmacy)
admin.site.register(contact_pharmacy)
admin.site.register(pharmacy_notifications)
| 21.8
| 68
| 0.857798
| 27
| 218
| 6.777778
| 0.407407
| 0.147541
| 0.278689
| 0.273224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068807
| 218
| 9
| 69
| 24.222222
| 0.901478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4e863c957562215f588df27cb2d2b18a2f7e93a3
| 104
|
py
|
Python
|
Flasky/app/task/__init__.py
|
LieonShelly/PythonFun
|
811760d368885109f9359c2663d8ce74886f6ad6
|
[
"MIT"
] | null | null | null |
Flasky/app/task/__init__.py
|
LieonShelly/PythonFun
|
811760d368885109f9359c2663d8ce74886f6ad6
|
[
"MIT"
] | null | null | null |
Flasky/app/task/__init__.py
|
LieonShelly/PythonFun
|
811760d368885109f9359c2663d8ce74886f6ad6
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
task_api = Blueprint('task', __name__)
from app.task import TaskUseCelery
| 20.8
| 38
| 0.798077
| 14
| 104
| 5.571429
| 0.642857
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 104
| 5
| 39
| 20.8
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.038095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
4ea9bd8320537dfdc7817f874f73725fef06c230
| 14,061
|
py
|
Python
|
h0rton/tests/test_trainval_data/test_xy_data.py
|
jiwoncpark/h0rton
|
2541885d70d090fdb777339cfb77a3a9f3e7996d
|
[
"MIT"
] | 4
|
2020-12-02T02:18:08.000Z
|
2021-11-25T21:56:33.000Z
|
h0rton/tests/test_trainval_data/test_xy_data.py
|
jiwoncpark/h0rton
|
2541885d70d090fdb777339cfb77a3a9f3e7996d
|
[
"MIT"
] | 25
|
2019-10-17T08:18:38.000Z
|
2020-12-26T09:38:05.000Z
|
h0rton/tests/test_trainval_data/test_xy_data.py
|
jiwoncpark/h0rton
|
2541885d70d090fdb777339cfb77a3a9f3e7996d
|
[
"MIT"
] | 1
|
2020-12-03T02:14:12.000Z
|
2020-12-03T02:14:12.000Z
|
import os
import shutil
import unittest
import numpy as np
import pandas as pd
from addict import Dict
from torch.utils.data import DataLoader
from h0rton.trainval_data import XYData
from baobab.configs import BaobabConfig
class TestXYData(unittest.TestCase):
"""A suite of tests on data preprocessing
"""
@classmethod
def setUpClass(cls):
cls.Y_cols = ["lens_mass_center_x", "src_light_center_x","lens_mass_center_y", "src_light_center_y", "external_shear_gamma_ext", "external_shear_psi_ext"]
cls.train_Y_mean = np.random.randn(len(cls.Y_cols))
cls.train_Y_std = np.abs(np.random.randn(len(cls.Y_cols))) + 1.0
cls.train_baobab_cfg_path = 'h0rton/tests/test_trainval_data/baobab_train.json'
cls.val_baobab_cfg_path = 'h0rton/tests/test_trainval_data/baobab_val.json'
cls.train_baobab_cfg = BaobabConfig.from_file(cls.train_baobab_cfg_path)
cls.val_baobab_cfg = BaobabConfig.from_file(cls.val_baobab_cfg_path)
cls.original_exptime = 5400.0 # value in baobab_[train/val].json
#####################
# Generate toy data #
#####################
# Training (n_data = 2)
os.makedirs(cls.train_baobab_cfg.out_dir, exist_ok=True)
cls.train_metadata = pd.DataFrame.from_dict({
"lens_mass_center_x": [1.5, 2.0],
"lens_mass_center_y": [1.8, 9.0],
"src_light_center_x": [10.1, 12.5],
"src_light_center_y": [29.2, 18.0],
"external_shear_gamma_ext": [-0.02, 0.02],
"external_shear_psi_ext": [-0.5, 0.5],
"img_filename": ['X_{0:07d}.npy'.format(i) for i in range(2)],
})
cls.train_metadata.to_csv(os.path.join(cls.train_baobab_cfg.out_dir, 'metadata.csv'), index=False)
cls.img_0 = np.abs(np.random.randn(9)*2.0).reshape([1, 3, 3])
cls.img_1 = np.abs(np.random.randn(9)*2.0).reshape([1, 3, 3])
np.save(os.path.join(cls.train_baobab_cfg.out_dir, 'X_{0:07d}.npy'.format(0)), cls.img_0)
np.save(os.path.join(cls.train_baobab_cfg.out_dir, 'X_{0:07d}.npy'.format(1)), cls.img_1)
# Validation (n_data = 3)
os.makedirs(cls.val_baobab_cfg.out_dir, exist_ok=True)
cls.val_metadata = pd.DataFrame.from_dict({
"lens_mass_center_x": np.random.randn(3),
"lens_mass_center_y": np.random.randn(3),
"src_light_center_x": np.random.randn(3),
"src_light_center_y": np.random.randn(3),
"external_shear_gamma_ext": np.random.randn(3),
"external_shear_psi_ext": np.random.randn(3),
"img_filename": ['X_{0:07d}.npy'.format(i) for i in range(3)],
})
cls.img_0_val = np.abs(np.random.randn(9)*2.0).reshape([1, 3, 3])
cls.img_1_val = np.abs(np.random.randn(9)*2.0).reshape([1, 3, 3])
cls.img_2_val = np.abs(np.random.randn(9)*2.0).reshape([1, 3, 3])
np.save(os.path.join(cls.val_baobab_cfg.out_dir, 'X_{0:07d}.npy'.format(0)), cls.img_0_val)
np.save(os.path.join(cls.val_baobab_cfg.out_dir, 'X_{0:07d}.npy'.format(1)), cls.img_1_val)
np.save(os.path.join(cls.val_baobab_cfg.out_dir, 'X_{0:07d}.npy'.format(2)), cls.img_2_val)
cls.val_metadata.to_csv(os.path.join(cls.val_baobab_cfg.out_dir, 'metadata.csv'), index=False)
@classmethod
def tearDownClass(cls):
"""Remove the toy data
"""
shutil.rmtree(cls.train_baobab_cfg.out_dir)
shutil.rmtree(cls.val_baobab_cfg.out_dir)
def test_X_identity(self):
"""Test if the input iamge equals the dataset image, when nothing is done to the image at all
"""
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_identity')
def test_X_transformation_log(self):
"""Test if the images transform as expected, with log(1+X)
"""
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=True, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0
expected_img = np.log1p(expected_img)
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_transformation_log')
def test_X_transformation_rescale(self):
"""Test if the images transform as expected, with whitening
"""
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=True, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0
expected_img = (expected_img - np.mean(expected_img))/np.std(expected_img, ddof=1)
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_transformation_rescale')
def test_X_transformation_log_rescale(self):
"""Test if the images transform as expected, with log(1+X) and whitening
"""
# Without exposure time factor
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=True, log_pixels=True, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0
expected_img = np.log1p(expected_img)
# Note torch std takes into account Bessel correction
expected_img = (expected_img - np.mean(expected_img))/np.std(expected_img, ddof=1)
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_transformation_log_rescale, without exposure time factor')
# With exposure time factor
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=True, log_pixels=True, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0*2.0
expected_img = np.log1p(expected_img)
# Note torch std takes into account Bessel correction
expected_img = (expected_img - np.mean(expected_img))/np.std(expected_img, ddof=1)
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_transformation_log_rescale, with exposure time factor')
def test_X_exposure_time_factor(self):
"""Test if the images scale by the new effective exposure time correctly
"""
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_img, _ = train_data[0]
expected_img = self.img_0*2.0
np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_exposure_time_factor')
def test_Y_transformation_(self):
"""Test if the target Y whitens correctly
"""
# Training
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=None, train_Y_std=None, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
_, actual_Y_0 = train_data[0]
_, actual_Y_1 = train_data[1]
actual_Y = np.stack([actual_Y_0, actual_Y_1], axis=0)
Y_df = self.train_metadata[self.Y_cols].copy()
Y_df['src_light_center_x'] -= Y_df['lens_mass_center_x']
Y_df['src_light_center_y'] -= Y_df['lens_mass_center_y']
expected_Y = Y_df.values
before_whitening_Y = Y_df.values
#expected_Y = (expected_Y - self.train_Y_mean.reshape([1, -1]))/self.train_Y_std.reshape([1, -1])
expected_Y[np.argmin(before_whitening_Y, axis=0), np.arange(len(self.Y_cols))] = -1
expected_Y[np.argmax(before_whitening_Y, axis=0), np.arange(len(self.Y_cols))] = 1
np.testing.assert_array_equal(actual_Y_0.shape, [len(self.Y_cols),], err_msg='shape of single example Y for training')
np.testing.assert_array_almost_equal(actual_Y, expected_Y, err_msg='transformed Y for training')
# Validation
val_data = XYData(False, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
_, actual_Y_0 = val_data[0]
_, actual_Y_1 = val_data[1]
_, actual_Y_2 = val_data[2]
actual_Y = np.stack([actual_Y_0, actual_Y_1, actual_Y_2], axis=0)
expected_Y = self.val_metadata[self.Y_cols].copy()
expected_Y['src_light_center_x'] -= expected_Y['lens_mass_center_x']
expected_Y['src_light_center_y'] -= expected_Y['lens_mass_center_y']
expected_Y = expected_Y.values
expected_Y = (expected_Y - self.train_Y_mean.reshape([1, -1]))/self.train_Y_std.reshape([1, -1])
np.testing.assert_array_equal(actual_Y_0.shape, [len(self.Y_cols),], err_msg='shape of single example Y for validation for arbitrary train mean and std')
np.testing.assert_array_almost_equal(actual_Y, expected_Y, err_msg='transformed Y for validation for arbitrary train mean and std')
def test_train_vs_val(self):
"""Test if the images and metadata are loaded from the correct folder (train/val)
"""
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
val_data = XYData(False, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
np.testing.assert_equal(len(train_data), 2, err_msg='reading from correct folder (train/val)')
np.testing.assert_equal(len(val_data), 3, err_msg='reading from correct folder (train/val)')
def test_tensor_type(self):
"""Test if X, Y are of the configured data type
"""
# DoubleTensor
train_data = XYData(True, self.Y_cols, 'DoubleTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_X_0, actual_Y_0 = train_data[0]
assert actual_X_0.type() == 'torch.DoubleTensor'
assert actual_Y_0.type() == 'torch.DoubleTensor'
# FloatTensor
train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=False, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime*2.0}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
actual_X_0, actual_Y_0 = train_data[0]
assert actual_X_0.type() == 'torch.FloatTensor'
assert actual_Y_0.type() == 'torch.FloatTensor'
if __name__ == '__main__':
unittest.main()
| 74.005263
| 396
| 0.694403
| 2,165
| 14,061
| 4.115012
| 0.091917
| 0.065664
| 0.075878
| 0.052531
| 0.829498
| 0.773936
| 0.739926
| 0.722191
| 0.702436
| 0.66719
| 0
| 0.020721
| 0.193443
| 14,061
| 190
| 397
| 74.005263
| 0.764836
| 0.07567
| 0
| 0.218045
| 0
| 0
| 0.121859
| 0.029708
| 0
| 0
| 0
| 0
| 0.120301
| 1
| 0.075188
| false
| 0
| 0.067669
| 0
| 0.150376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ec088ed3db1723dba14b1bec935f77fa3007a1d
| 6,517
|
py
|
Python
|
tests/test_vertex_array.py
|
2dx/moderngl
|
5f932560a535469626d79d22e4205f400e18f328
|
[
"MIT"
] | null | null | null |
tests/test_vertex_array.py
|
2dx/moderngl
|
5f932560a535469626d79d22e4205f400e18f328
|
[
"MIT"
] | null | null | null |
tests/test_vertex_array.py
|
2dx/moderngl
|
5f932560a535469626d79d22e4205f400e18f328
|
[
"MIT"
] | null | null | null |
from array import array
import unittest
import moderngl
import numpy as np
from common import get_context
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ctx = get_context()
def test_padding(self):
prog = self.ctx.program(
vertex_shader="""
#version 330
in vec2 pos;
in vec2 velocity;
out vec2 out_pos;
void main() {
out_pos = pos + velocity;
}
""",
)
buffer = self.ctx.buffer(array('f', range(16)))
self.ctx.vertex_array(prog, [(buffer, '2f 2x4', 'pos')])
self.ctx.vertex_array(prog, [(buffer, '2f 2f', 'pos', 'velocity')])
def test_empty(self):
prog = self.ctx.program(
vertex_shader="""
#version 330
in vec2 pos;
in vec2 velocity;
out vec2 out_pos;
void main() {
out_pos = pos + velocity;
}
""",
)
self.ctx.vertex_array(prog, [])
# def test_optional(self):
# prog = self.ctx.program(
# vertex_shader="""
# #version 330
# in vec2 pos;
# in vec2 velocity;
# in vec4 color;
# out vec2 out_pos;
# void main() {
# out_pos = pos + velocity;
# }
# """,
# )
# buffer = self.ctx.buffer(array('f', range(16)))
# self.ctx.vertex_array(prog, [(buffer, '2f 2f 4f', 'pos', 'velocity', 'color?')])
def test_1(self):
prog = self.ctx.program(
vertex_shader='''
#version 330
in vec4 in_vert;
out vec4 out_vert;
void main() {
out_vert = in_vert;
}
''',
varyings=['out_vert']
)
vbo1 = self.ctx.buffer(np.array([4.0, 2.0, 7.5, 1.8], dtype='f4').tobytes())
vbo2 = self.ctx.buffer(reserve=vbo1.size)
vao = self.ctx.simple_vertex_array(prog, vbo1, 'in_vert')
vao.transform(vbo2, moderngl.POINTS)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 2.0, 7.5, 1.8])
def test_2(self):
prog = self.ctx.program(
vertex_shader='''
#version 330
in vec4 in_vert;
out vec4 out_vert;
void main() {
out_vert = in_vert;
}
''',
varyings=['out_vert']
)
vbo1 = self.ctx.buffer(np.array([4.0, 2.0, 7.5, 1.8], dtype='f4').tobytes())
vbo2 = self.ctx.buffer(reserve=16)
vao = self.ctx.vertex_array(prog, [(vbo1, '4f', 'in_vert')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 2.0, 7.5, 1.8])
vao = self.ctx.vertex_array(prog, [(vbo1, '3f', 'in_vert')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 2.0, 7.5, 1.0])
vao = self.ctx.vertex_array(prog, [(vbo1, '2f', 'in_vert')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 2.0, 0.0, 1.0])
vao = self.ctx.vertex_array(prog, [(vbo1, '1f', 'in_vert')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 0.0, 0.0, 1.0])
def test_3(self):
prog = self.ctx.program(
vertex_shader='''
#version 330
in mat4 in_mat;
out mat4 out_mat;
void main() {
out_mat = in_mat;
}
''',
varyings=['out_mat']
)
vbo1 = self.ctx.buffer(np.arange(1, 17, dtype='f4').tobytes())
vbo2 = self.ctx.buffer(reserve=64)
vao = self.ctx.vertex_array(prog, [(vbo1, '16f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16,
])
vao = self.ctx.vertex_array(prog, [(vbo1, '12f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [
1, 2, 3, 1,
4, 5, 6, 1,
7, 8, 9, 1,
10, 11, 12, 1,
])
vao = self.ctx.vertex_array(prog, [(vbo1, '8f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [
1, 2, 0, 1,
3, 4, 0, 1,
5, 6, 0, 1,
7, 8, 0, 1,
])
vao = self.ctx.vertex_array(prog, [(vbo1, '4f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [
1, 0, 0, 1,
2, 0, 0, 1,
3, 0, 0, 1,
4, 0, 0, 1,
])
def test_4(self):
prog = self.ctx.program(
vertex_shader='''
#version 330
in mat2 in_mat;
out mat2 out_mat;
void main() {
out_mat = in_mat;
}
''',
varyings=['out_mat']
)
vbo1 = self.ctx.buffer(np.array([4.0, 2.0, 7.5, 1.8], dtype='f4').tobytes())
vbo2 = self.ctx.buffer(reserve=16)
vao = self.ctx.vertex_array(prog, [(vbo1, '4f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 2.0, 7.5, 1.8])
vao = self.ctx.vertex_array(prog, [(vbo1, '2f', 'in_mat')])
vao.transform(vbo2, moderngl.POINTS, vertices=1)
res = np.frombuffer(vbo2.read(), dtype='f4')
np.testing.assert_almost_equal(res, [4.0, 0.0, 2.0, 0.0])
if __name__ == '__main__':
unittest.main()
| 31.181818
| 90
| 0.491944
| 824
| 6,517
| 3.770631
| 0.115291
| 0.072095
| 0.072417
| 0.081107
| 0.851625
| 0.841326
| 0.841326
| 0.820406
| 0.777277
| 0.756357
| 0
| 0.070765
| 0.355992
| 6,517
| 208
| 91
| 31.331731
| 0.669526
| 0.0646
| 0
| 0.559748
| 0
| 0
| 0.228163
| 0
| 0
| 0
| 0
| 0
| 0.069182
| 1
| 0.044025
| false
| 0
| 0.031447
| 0
| 0.081761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
14d1d929af5e2c43fee5138d8cd72ddf00d7f81f
| 11,861
|
py
|
Python
|
Code/Chenglong/feature_first_last_ngram.py
|
ChenglongChen/Kaggle_Homedepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 465
|
2016-04-27T13:17:36.000Z
|
2020-05-15T11:05:13.000Z
|
Code/Chenglong/feature_first_last_ngram.py
|
CharlotteSean/Kaggle_HomeDepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 1
|
2016-10-15T04:33:54.000Z
|
2016-10-15T04:33:54.000Z
|
Code/Chenglong/feature_first_last_ngram.py
|
CharlotteSean/Kaggle_HomeDepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 230
|
2016-04-30T06:35:17.000Z
|
2019-12-04T08:23:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: first and last ngram features
@note: in the final submission, we only used intersect count, NOT including intersect position.
"""
import re
import string
import numpy as np
import pandas as pd
import config
from utils import dist_utils, ngram_utils, nlp_utils, np_utils, pkl_utils
from utils import logging_utils, time_utils
from feature_base import BaseEstimator, PairwiseFeatureWrapper
from feature_intersect_position import _inter_pos_list, _inter_norm_pos_list
# tune the token pattern to get a better correlation with y_train
# token_pattern = r"(?u)\b\w\w+\b"
# token_pattern = r"\w{1,}"
# token_pattern = r"\w+"
# token_pattern = r"[\w']+"
token_pattern = " " # just split the text into tokens
# -------------------------- Count ----------------------------------
class Count_Ngram_BaseEstimator(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, idx, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.idx = idx
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
self.str_match_threshold = str_match_threshold
def _get_match_count(self, obs, target, idx):
cnt = 0
if (len(obs) != 0) and (len(target) != 0):
for word in target:
if dist_utils._is_str_match(word, obs[idx], self.str_match_threshold):
cnt += 1
return cnt
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
return self._get_match_count(obs_ngrams, target_ngrams, self.idx)
class FirstIntersectCount_Ngram(Count_Ngram_BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, ngram, 0, aggregation_mode, str_match_threshold)
def __name__(self):
return "FirstIntersectCount_%s"%self.ngram_str
class LastIntersectCount_Ngram(Count_Ngram_BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, ngram, -1, aggregation_mode, str_match_threshold)
def __name__(self):
return "LastIntersectCount_%s"%self.ngram_str
# ------------------------- Ratio -------------------------------------------
class Ratio_Ngram_BaseEstimator(Count_Ngram_BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, idx, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, ngram, idx, aggregation_mode, str_match_threshold)
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
return np_utils._try_divide(self._get_match_count(obs_ngrams, target_ngrams, self.idx), len(target_ngrams))
class FirstIntersectRatio_Ngram(Ratio_Ngram_BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, ngram, 0, aggregation_mode, str_match_threshold)
def __name__(self):
return "FirstIntersectRatio_%s"%self.ngram_str
class LastIntersectRatio_Ngram(Ratio_Ngram_BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode="",
str_match_threshold=config.STR_MATCH_THRESHOLD):
super().__init__(obs_corpus, target_corpus, ngram, -1, aggregation_mode, str_match_threshold)
def __name__(self):
return "LastIntersectRatio_%s"%self.ngram_str
# -------------------- Position ---------------------
class Position_Ngram_BaseEstimator(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, idx, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.idx = idx
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
return _inter_pos_list(target_ngrams, [obs_ngrams[self.idx]])
class FirstIntersectPosition_Ngram(Position_Ngram_BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, ngram, 0, aggregation_mode)
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "FirstIntersectPosition_%s_%s"%(
self.ngram_str, string.capwords(self.aggregation_mode))
elif isinstance(self.aggregation_mode, list):
feat_name = ["FirstIntersectPosition_%s_%s"%(
self.ngram_str, string.capwords(m)) for m in self.aggregation_mode]
return feat_name
class LastIntersectPosition_Ngram(Position_Ngram_BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, ngram, -1, aggregation_mode)
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "LastIntersectPosition_%s_%s"%(
self.ngram_str, string.capwords(self.aggregation_mode))
elif isinstance(self.aggregation_mode, list):
feat_name = ["LastIntersectPosition_%s_%s"%(
self.ngram_str, string.capwords(m)) for m in self.aggregation_mode]
return feat_name
# -------------------------- Norm Position ----------------------------------
class NormPosition_Ngram_BaseEstimator(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, idx, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.idx = idx
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
return _inter_norm_pos_list(target_ngrams, [obs_ngrams[self.idx]])
class FirstIntersectNormPosition_Ngram(NormPosition_Ngram_BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, ngram, 0, aggregation_mode)
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "FirstIntersectNormPosition_%s_%s"%(
self.ngram_str, string.capwords(self.aggregation_mode))
elif isinstance(self.aggregation_mode, list):
feat_name = ["FirstIntersectNormPosition_%s_%s"%(
self.ngram_str, string.capwords(m)) for m in self.aggregation_mode]
return feat_name
class LastIntersectNormPosition_Ngram(NormPosition_Ngram_BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, ngram, -1, aggregation_mode)
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "LastIntersectNormPosition_%s_%s"%(
self.ngram_str, string.capwords(self.aggregation_mode))
elif isinstance(self.aggregation_mode, list):
feat_name = ["LastIntersectNormPosition_%s_%s"%(
self.ngram_str, string.capwords(m)) for m in self.aggregation_mode]
return feat_name
# ---------------------------- Main --------------------------------------
def run_count():
logname = "generate_feature_first_last_ngram_count_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
generators = [
FirstIntersectCount_Ngram,
LastIntersectCount_Ngram,
FirstIntersectRatio_Ngram,
LastIntersectRatio_Ngram,
]
obs_fields_list = []
target_fields_list = []
## query in document
obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
## document in query
obs_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
target_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
ngrams = [1,2,3,12,123][:3]
for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
for generator in generators:
for ngram in ngrams:
param_list = [ngram]
pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
pf.go()
def run_position():
logname = "generate_feature_first_last_ngram_position_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
generators = [
FirstIntersectPosition_Ngram,
LastIntersectPosition_Ngram,
FirstIntersectNormPosition_Ngram,
LastIntersectNormPosition_Ngram,
]
obs_fields_list = []
target_fields_list = []
## query in document
obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
## document in query
obs_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
target_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
ngrams = [1,2,3,12,123][:3]
aggregation_mode = ["mean", "std", "max", "min", "median"]
for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
for generator in generators:
for ngram in ngrams:
param_list = [ngram, aggregation_mode]
pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
pf.go()
if __name__ == "__main__":
run_count()
# # not used in final submission
# run_position()
| 45.795367
| 158
| 0.695473
| 1,409
| 11,861
| 5.398864
| 0.117104
| 0.082818
| 0.047325
| 0.066255
| 0.80702
| 0.802287
| 0.787564
| 0.787564
| 0.787564
| 0.777047
| 0
| 0.003714
| 0.182868
| 11,861
| 258
| 159
| 45.972868
| 0.78116
| 0.082792
| 0
| 0.637363
| 0
| 0
| 0.106483
| 0.066587
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148352
| false
| 0
| 0.049451
| 0.021978
| 0.335165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
14fc5f8ad475bef473b6ded370e5fb087574c696
| 106
|
py
|
Python
|
canary/servicemap.py
|
bohatiuk/agh-reddit-stash
|
f0c3cd51f509c81f3df1d48e3867a178b7c84630
|
[
"Apache-2.0"
] | null | null | null |
canary/servicemap.py
|
bohatiuk/agh-reddit-stash
|
f0c3cd51f509c81f3df1d48e3867a178b7c84630
|
[
"Apache-2.0"
] | null | null | null |
canary/servicemap.py
|
bohatiuk/agh-reddit-stash
|
f0c3cd51f509c81f3df1d48e3867a178b7c84630
|
[
"Apache-2.0"
] | null | null | null |
import json
def service_map():
with open('service-map.json') as jsn:
return json.load(jsn)
| 13.25
| 41
| 0.641509
| 16
| 106
| 4.1875
| 0.6875
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235849
| 106
| 7
| 42
| 15.142857
| 0.82716
| 0
| 0
| 0
| 0
| 0
| 0.152381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0918353043047a9982b7f3d1fdb84c610072a75b
| 27
|
py
|
Python
|
tests/workspace/mod2/mod2_1.py
|
oraluben/pycds
|
5c7d11363851e451f07bded967168e493fb16a2f
|
[
"MIT"
] | 8
|
2022-03-28T02:19:52.000Z
|
2022-03-29T22:10:14.000Z
|
tests/workspace/mod2/mod2_1.py
|
oraluben/pycds
|
5c7d11363851e451f07bded967168e493fb16a2f
|
[
"MIT"
] | null | null | null |
tests/workspace/mod2/mod2_1.py
|
oraluben/pycds
|
5c7d11363851e451f07bded967168e493fb16a2f
|
[
"MIT"
] | 1
|
2022-03-28T07:08:26.000Z
|
2022-03-28T07:08:26.000Z
|
def mod2_1_foo():
pass
| 9
| 17
| 0.62963
| 5
| 27
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
093bf2115cea2b57ad107313b7751beb10786959
| 28
|
py
|
Python
|
psmlprocess/__init__.py
|
nathan-az/pyspark-ml-processing-utils
|
1781221ac05b830ccf0d97972204b01b8459119f
|
[
"Apache-2.0"
] | null | null | null |
psmlprocess/__init__.py
|
nathan-az/pyspark-ml-processing-utils
|
1781221ac05b830ccf0d97972204b01b8459119f
|
[
"Apache-2.0"
] | null | null | null |
psmlprocess/__init__.py
|
nathan-az/pyspark-ml-processing-utils
|
1781221ac05b830ccf0d97972204b01b8459119f
|
[
"Apache-2.0"
] | null | null | null |
from .transformers import *
| 14
| 27
| 0.785714
| 3
| 28
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
11f037970058374f2a7827928d589715eaa654b1
| 16,078
|
py
|
Python
|
tests/test_read_subset.py
|
GeoscienceAustralia/wagl
|
2de957da754a90c3bedd8ee3196c5effd849ac80
|
[
"Apache-2.0"
] | 22
|
2018-05-30T23:42:10.000Z
|
2021-12-25T14:21:46.000Z
|
tests/test_read_subset.py
|
sixy6e/wagl
|
523f574b4a4b62c3aed3a378e13a7548a7f21c0c
|
[
"Apache-2.0"
] | 52
|
2018-02-20T05:31:55.000Z
|
2021-11-23T23:38:15.000Z
|
tests/test_read_subset.py
|
sixy6e/wagl
|
523f574b4a4b62c3aed3a378e13a7548a7f21c0c
|
[
"Apache-2.0"
] | 8
|
2018-02-20T05:08:38.000Z
|
2021-08-12T23:16:41.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import
import os
import shutil
import tempfile
import unittest
import numpy
import h5py
from wagl.data import read_subset
from wagl.data import write_img
from wagl import unittesting_tools as ut
class TestReadSubset(unittest.TestCase):
img, geobox = ut.create_test_image((1200, 1500))
img[:] = 1
fid = h5py.File("test-subset.h5", "w", backing_store=False, driver="core")
ds = fid.create_dataset("data", data=img)
ds.attrs["geotransform"] = geobox.transform.to_gdal()
ds.attrs["crs_wkt"] = geobox.crs.ExportToWkt()
ds.attrs["fillvalue"] = 0
subs_shape = (200, 300)
@unittest.skip("Refactor DSM subsetting logic; TODO update test")
def testWestBounds(self):
"""
Test that a co-ordinate west of the image domain returns an
index error.
The subset attempts to read a 20 by 20 block with half contained
within the image bounds and half contained outside the image
bounds.
"""
img, geobox = ut.create_test_image()
# Temporarily write the image to disk
temp_dir = tempfile.mkdtemp()
fname = os.path.join(temp_dir, "testWestBounds")
write_img(img, fname, geobox=geobox)
# Create box to read 10 pixels left of the image bounds
UL = geobox.convert_coordinates((-9, 0))
UR = geobox.convert_coordinates((9, 0))
LR = geobox.convert_coordinates((9, 10))
LL = geobox.convert_coordinates((-9, 10))
kwds = {"fname": fname, "ul_xy": UL, "ur_xy": UR, "lr_xy": LR, "ll_xy": LL}
self.assertRaises(IndexError, read_subset, **kwds)
# Cleanup
shutil.rmtree(temp_dir)
@unittest.skip("Refactor DSM subsetting logic; TODO update test")
def testEastBounds(self):
"""
Test that a co-ordinate east of the image domain returns an
index error.
The subset attempts to read a 20 by 20 block with half contained
within the image bounds and half contained outside the image
"""
img, geobox = ut.create_test_image()
cols, rows = geobox.get_shape_xy()
# Temporarily write the image to disk
temp_dir = tempfile.mkdtemp()
fname = os.path.join(temp_dir, "testEastBounds")
write_img(img, fname, geobox=geobox)
# Create box to read 10 pixels right of the image bounds
UL = geobox.convert_coordinates((cols - 9, 0))
UR = geobox.convert_coordinates((cols + 10, 0))
LR = geobox.convert_coordinates((cols + 10, 10))
LL = geobox.convert_coordinates((cols - 9, 10))
kwds = {"fname": fname, "ul_xy": UL, "ur_xy": UR, "lr_xy": LR, "ll_xy": LL}
self.assertRaises(IndexError, read_subset, **kwds)
# Cleanup
shutil.rmtree(temp_dir)
@unittest.skip("Refactor DSM subsetting logic; TODO update test")
def testNorthBounds(self):
"""
Test that a co-ordinate north of the image domain returns an
index error.
The subset attempts to read a 20 by 20 block with half contained
within the image bounds and half contained outside the image
"""
img, geobox = ut.create_test_image()
# Temporarily write the image to disk
temp_dir = tempfile.mkdtemp()
fname = os.path.join(temp_dir, "testNorthBounds")
write_img(img, fname, geobox=geobox)
# Create box to read 10 pixels above the image bounds
UL = geobox.convert_coordinates((0, -9))
UR = geobox.convert_coordinates((10, -9))
LR = geobox.convert_coordinates((10, 10))
LL = geobox.convert_coordinates((0, 10))
kwds = {"fname": fname, "ul_xy": UL, "ur_xy": UR, "lr_xy": LR, "ll_xy": LL}
self.assertRaises(IndexError, read_subset, **kwds)
# Cleanup
shutil.rmtree(temp_dir)
@unittest.skip("Refactor DSM subsetting logic; TODO update test")
def testSouthBounds(self):
"""
Test that a co-ordinate south of the image domain returns an
index error.
The subset attempts to read a 20 by 20 block with half contained
within the image bounds and half contained outside the image
"""
img, geobox = ut.create_test_image()
cols, rows = geobox.get_shape_xy()
# Temporarily write the image to disk
temp_dir = tempfile.mkdtemp()
fname = os.path.join(temp_dir, "testSouthBounds")
write_img(img, fname, geobox=geobox)
# Create box to read 10 pixels below the image bounds
UL = geobox.convert_coordinates((0, rows - 9))
UR = geobox.convert_coordinates((10, rows - 9))
LR = geobox.convert_coordinates((10, rows + 10))
LL = geobox.convert_coordinates((0, rows + 10))
kwds = {"fname": fname, "ul_xy": UL, "ur_xy": UR, "lr_xy": LR, "ll_xy": LL}
self.assertRaises(IndexError, read_subset, **kwds)
# Cleanup
shutil.rmtree(temp_dir)
@unittest.skip("Requires refactoring")
def test_correct_subset(self):
"""
Test that the subset is what we expect.
Read a 10 by 10 starting at the UL corner.
"""
img, geobox = ut.create_test_image()
cols, rows = geobox.get_shape_xy()
# Temporarily write the image to disk
temp_dir = tempfile.mkdtemp()
fname = os.path.join(temp_dir, "test_image")
write_img(img, fname, geobox=geobox)
# Create box to read 10 pixels below the image bounds
UL = geobox.convert_coordinates((0, 0))
UR = geobox.convert_coordinates((9, 0))
LR = geobox.convert_coordinates((9, 9))
LL = geobox.convert_coordinates((0, 9))
kwds = {"fname": fname, "ul_xy": UL, "ur_xy": UR, "lr_xy": LR, "ll_xy": LL}
subs, geobox = read_subset(**kwds)
base = img[0:10, 0:10]
result = numpy.sum(base - subs)
self.assertTrue(result == 0)
# Cleanup
shutil.rmtree(temp_dir)
def test_case_a(self):
"""
Origin = (-150, -50); `O`
O+----+
- -
- +-----------+
- - - -
+----+ -
- -
- -
- -
- -
+-----------+
"""
# indices based on full array
ul = (-150, -50)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 50 * 250
self.assertTrue(data.sum() == count)
def test_case_b(self):
"""
Origin = (-150, 600); `O`
O+---+
- -
+---- ----+
- - - -
- +---+ -
- -
- -
- -
- -
+-----------+
"""
# indices based on full array
ul = (-150, 600)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 50 * 300
self.assertTrue(data.sum() == count)
def test_case_c(self):
"""
Origin = (-150, 1400); `O`
O+---+
- -
+------------+ -
- - - -
- +---+
- -
- -
- -
- -
+------------+
"""
# indices based on full array
ul = (-150, 1400)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 50 * 100
self.assertTrue(data.sum() == count)
def test_case_d(self):
"""
Origin = (600, -50); `O`
+-----------+
- -
O+-----+ -
- - - -
- - - -
+-----+ -
- -
+-----------+
"""
# indices based on full array
ul = (600, -50)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 200 * 250
self.assertTrue(data.sum() == count)
def test_case_e(self):
"""
Origin = (600, 600); `O`
+-----------+
- -
- O+---+ -
- - - -
- - - -
- +---+ -
- -
+-----------+
"""
# indices based on full array
ul = (600, 600)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 200 * 300
self.assertTrue(data.sum() == count)
def test_case_f(self):
"""
Origin = (600, 1400); `O`
+-----------+
- -
- O+-----+
- - - -
- - - -
- +-----+
- -
+-----------+
"""
# indices based on full array
ul = (600, 1400)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 200 * 100
self.assertTrue(data.sum() == count)
def test_case_g(self):
"""
Origin = (1100, -50); `O`
+-----------+
- -
- -
- -
- -
O+-----+ -
- - - -
- +-----------+
- -
- -
+-----+
"""
# indices based on full array
ul = (1100, -50)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 100 * 250
self.assertTrue(data.sum() == count)
def test_case_h(self):
"""
Origin = (1100, 600); `O`
+-----------+
- -
- -
- -
- -
- O+----+ -
- - - -
+-----------+
- -
- -
+----+
"""
# indices based on full array
ul = (1100, 600)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 100 * 300
self.assertTrue(data.sum() == count)
def test_case_i(self):
"""
Origin = (1100, 1400); `O`
+-----------+
- -
- -
- -
- -
- O+-----+
- - -
+-----------+---
- -
- -
+-----+
"""
# indices based on full array
ul = (1100, 1400)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
count = 100 * 100
self.assertTrue(data.sum() == count)
def test_case_j(self):
"""
Origin = (600, -400)
+-----------+
- -
O+----+ - -
- - - -
- - - -
+----+ - -
- -
+-----------+
"""
# indices based on full array
ul = (600, -400)
ur = (ul[0], ul[1] + self.subs_shape[1])
lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
ll = (ul[0] + self.subs_shape[0], ul[1])
# real world coords (note reversing (y, x) to (x, y)
ul_xy_map = self.geobox.transform * ul[::-1]
ur_xy_map = self.geobox.transform * ur[::-1]
lr_xy_map = self.geobox.transform * lr[::-1]
ll_xy_map = self.geobox.transform * ll[::-1]
# read subset
with self.assertRaises(IndexError):
read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
if __name__ == "__main__":
unittest.main()
| 31.774704
| 83
| 0.491977
| 1,995
| 16,078
| 3.79599
| 0.090727
| 0.052819
| 0.068665
| 0.079229
| 0.871913
| 0.849597
| 0.803248
| 0.803248
| 0.782913
| 0.695761
| 0
| 0.041256
| 0.356263
| 16,078
| 505
| 84
| 31.837624
| 0.690435
| 0.257992
| 0
| 0.597156
| 0
| 0
| 0.043124
| 0
| 0
| 0
| 0
| 0
| 0.07109
| 1
| 0.07109
| false
| 0
| 0.047393
| 0
| 0.137441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eeba59ebaaeaf5a6a1823df6d22007b608e55a1c
| 270
|
py
|
Python
|
src/modules/providers/exceptions.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 5
|
2021-07-01T16:31:29.000Z
|
2022-01-29T14:32:13.000Z
|
src/modules/providers/exceptions.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 45
|
2020-10-25T19:41:26.000Z
|
2022-03-25T06:31:58.000Z
|
src/modules/providers/exceptions.py
|
DmitryBurnaev/podcast-service
|
53349a3f9aed22a8024d0c83380f9a02464962a3
|
[
"MIT"
] | 1
|
2022-01-27T11:30:07.000Z
|
2022-01-27T11:30:07.000Z
|
from common.exceptions import BaseApplicationError
class FFMPegPreparationError(BaseApplicationError):
message = "We couldn't prepare file by ffmpeg"
class SourceFetchError(BaseApplicationError):
message = "We couldn't extract info about requested episode."
| 27
| 65
| 0.803704
| 28
| 270
| 7.75
| 0.75
| 0.248848
| 0.267281
| 0.322581
| 0.331797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137037
| 270
| 9
| 66
| 30
| 0.93133
| 0
| 0
| 0
| 0
| 0
| 0.307407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0171cae2e7872622c4cad7a3ca03abf324f9162f
| 221
|
py
|
Python
|
devices/sensor/smoke.py
|
volmart/domoticz-zigbee2mqtt-plugin
|
1b86236cff618b58adceea8a417eff5a92eee548
|
[
"MIT"
] | null | null | null |
devices/sensor/smoke.py
|
volmart/domoticz-zigbee2mqtt-plugin
|
1b86236cff618b58adceea8a417eff5a92eee548
|
[
"MIT"
] | null | null | null |
devices/sensor/smoke.py
|
volmart/domoticz-zigbee2mqtt-plugin
|
1b86236cff618b58adceea8a417eff5a92eee548
|
[
"MIT"
] | null | null | null |
from devices.boolean_sensor import BooleanSensor
class SmokeSensor(BooleanSensor):
def __init__(self, devices, alias, value_key):
super().__init__(devices, alias, value_key, BooleanSensor.SENSOR_TYPE_SMOKE)
| 31.571429
| 84
| 0.782805
| 26
| 221
| 6.153846
| 0.653846
| 0.15
| 0.2125
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131222
| 221
| 6
| 85
| 36.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6d6a9c3e032f91ce05b1dc8d81e3cffa9ec48378
| 122
|
py
|
Python
|
tests/conftest.py
|
xiaofei0722/xfapitest
|
0050520f872eb2b5c997b7d01d9851048450deff
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
xiaofei0722/xfapitest
|
0050520f872eb2b5c997b7d01d9851048450deff
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
xiaofei0722/xfapitest
|
0050520f872eb2b5c997b7d01d9851048450deff
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import requests
@pytest.fixture(scope="function")
def init_session():
return requests.sessions.Session()
| 24.4
| 38
| 0.786885
| 15
| 122
| 6.333333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 122
| 5
| 38
| 24.4
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.065041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6d809ac7066c900b0a899ed8ae1ec2ad93b5c818
| 340
|
py
|
Python
|
src/python/unicode_segmentation/_unicode_segmentation__ffi.py
|
anthrotype/unicode-segmentation-py
|
7f28f5141660f5f6c7dc5809521c931c335ec640
|
[
"Apache-2.0"
] | 2
|
2020-03-10T02:22:31.000Z
|
2020-03-10T07:39:07.000Z
|
src/python/unicode_segmentation/_unicode_segmentation__ffi.py
|
anthrotype/unicode-segmentation-py
|
7f28f5141660f5f6c7dc5809521c931c335ec640
|
[
"Apache-2.0"
] | null | null | null |
src/python/unicode_segmentation/_unicode_segmentation__ffi.py
|
anthrotype/unicode-segmentation-py
|
7f28f5141660f5f6c7dc5809521c931c335ec640
|
[
"Apache-2.0"
] | null | null | null |
# auto-generated file
import _cffi_backend
ffi = _cffi_backend.FFI('unicode_segmentation._unicode_segmentation__ffi',
_version = 0x2601,
_types = b'\x00\x00\x05\x0D\x00\x00\x06\x03\x00\x00\x07\x03\x00\x00\x02\x11\x00\x00\x00\x0F\x00\x00\x01\x01\x00\x00\x02\x01\x00\x00\x0A\x01',
_globals = (b'\x00\x00\x00\x23graphemes',0,),
)
| 37.777778
| 145
| 0.732353
| 58
| 340
| 4.086207
| 0.465517
| 0.278481
| 0.118143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242718
| 0.091176
| 340
| 8
| 146
| 42.5
| 0.524272
| 0.055882
| 0
| 0
| 1
| 0.166667
| 0.626959
| 0.626959
| 0
| 0
| 0.018809
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6dc57078d2bde188616b562474d7b6fd10929cca
| 3,923
|
py
|
Python
|
model/stuq.py
|
YYgroup/STmodel
|
0e608c7d3fe596a2bf2aad446bbd1e151c56b543
|
[
"MIT"
] | 1
|
2021-06-18T06:13:09.000Z
|
2021-06-18T06:13:09.000Z
|
model/stuq.py
|
YYgroup/STmodel
|
0e608c7d3fe596a2bf2aad446bbd1e151c56b543
|
[
"MIT"
] | null | null | null |
model/stuq.py
|
YYgroup/STmodel
|
0e608c7d3fe596a2bf2aad446bbd1e151c56b543
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy import stats
import STmodel.data as std
import STmodel.model.st as stm
def sample_params(npts, nstd_T=0.1, nstd_C=0.15):
# default values
A = 0.317
B = 0.033
T = 5.5
C = 1.0
file_path = std.get_file('linear_regression_AB.npz')
data = np.load(file_path)
A_samples = data['A'][:npts]
B_samples = data['B'][:npts]
# standard devaition of T and C
std_T = T * nstd_T
std_C = C * nstd_C
T_samples = np.random.normal(T, std_T, npts)
C_samples = np.random.normal(C, std_C, npts)
return A_samples, B_samples, T_samples, C_samples
def fwd_params(ur, lr,
unburnt,fuel,oxidizer,chemistry,
A_samples, B_samples, T_samples, C_samples):
n_samples = len(T_samples)
v_samples = np.zeros(n_samples)
r = stm.Reactant(unburnt, fuel, oxidizer, chemistry)
for i in range(n_samples):
m = stm.Model(r,
A = A_samples[i],
B = B_samples[i],
T = T_samples[i],
C = C_samples[i])
v_samples[i] = m.ratio_turbulent_burning_velocity(ur, lr)
return v_samples
def fwd_chem(ur, lr, unburnt, sc0, dl0, Le, sigma, flame_samples):
n_samples = flame_samples[::2].shape[0]
v_samples = np.zeros(n_samples)
for i in range(n_samples):
dl = flame_samples[2*i,0]
ReF = flame_samples[2*i+1,0]
sc = flame_samples[2*i+1,1]
I0_table = np.zeros(flame_samples[2*i:2*i+2,1:].shape)
I0_table[0] = flame_samples[2*i,1:] * Le
I0_table[1] = flame_samples[2*i+1,1:] / sc
rsc = sc0/sc
rdl = dl0/dl
r = stm.Reactant(unburnt, 'progvar',
Le=Le, sigma=sigma, ReF=ReF, I0_table=I0_table)
m = stm.Model(r)
v_samples[i] = m.ratio_turbulent_burning_velocity(ur*rsc, lr*rdl)
return v_samples
def fwd_chem_para(ur, lr, unburnt, sc0, dl0, Le, sigma, flame_samples,
A_samples, B_samples, T_samples, C_samples):
n_samples = flame_samples[::2].shape[0]
v_samples = np.zeros(n_samples)
for i in range(n_samples):
dl = flame_samples[2*i,0]
ReF = flame_samples[2*i+1,0]
sc = flame_samples[2*i+1,1]
I0_table = np.zeros(flame_samples[2*i:2*i+2,1:].shape)
I0_table[0] = flame_samples[2*i,1:] * Le
I0_table[1] = flame_samples[2*i+1,1:] / sc
rsc = sc0/sc
rdl = dl0/dl
r = stm.Reactant(unburnt, 'progvar',
Le=Le, sigma=sigma, ReF=ReF, I0_table=I0_table)
m = stm.Model(r,
A = A_samples[i],
B = B_samples[i],
T = T_samples[i],
C = C_samples[i])
v_samples[i] = m.ratio_turbulent_burning_velocity(ur*rsc, lr*rdl)
return v_samples
def fwd_lr(ur, lr_samples,
unburnt,fuel,oxidizer,chemistry):
n_samples = len(lr_samples)
v_samples = np.zeros(n_samples)
mixture = stm.Mixture(unburnt, fuel, oxidizer, chemistry)
for i, lr in enumerate(lr_samples):
v_samples[i] = mixture.ratio_turbulent_burning_velocity(ur, lr)
return v_samples
def fwd_params_lr(ur, lr_samples,
unburnt,fuel,oxidizer,chemistry,
A_samples, B_samples, T_samples, C_samples):
n_samples = len(T_samples) * len(lr_samples)
v_samples = np.zeros(n_samples)
for i in range(n_samples):
mixture = stm.Mixture(unburnt, fuel, oxidizer, chemistry,
A = A_samples[i],
B = B_samples[i],
T = T_samples[i],
C = C_samples[i])
for j, lr in enumerate(lr_samples):
v_samples[i*len(lr_samples)+j] = mixture.ratio_turbulent_burning_velocity(ur, lr)
return v_samples
| 28.845588
| 93
| 0.574305
| 591
| 3,923
| 3.588832
| 0.1489
| 0.064121
| 0.085809
| 0.079208
| 0.809524
| 0.809524
| 0.781707
| 0.770863
| 0.701084
| 0.628006
| 0
| 0.028445
| 0.309967
| 3,923
| 135
| 94
| 29.059259
| 0.755079
| 0.011216
| 0
| 0.612903
| 0
| 0
| 0.01032
| 0.006192
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.043011
| 0
| 0.172043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
099c4007fecc0a847f245186fdfd5e6c4d0f5c1b
| 72
|
py
|
Python
|
smartsim/ml/torch/__init__.py
|
billschereriii/SmartSim
|
7ef4cffeba23fe19b931bdae819f4de99bb112a3
|
[
"BSD-2-Clause"
] | 1
|
2022-01-19T21:18:59.000Z
|
2022-01-19T21:18:59.000Z
|
smartsim/ml/torch/__init__.py
|
billschereriii/SmartSim
|
7ef4cffeba23fe19b931bdae819f4de99bb112a3
|
[
"BSD-2-Clause"
] | null | null | null |
smartsim/ml/torch/__init__.py
|
billschereriii/SmartSim
|
7ef4cffeba23fe19b931bdae819f4de99bb112a3
|
[
"BSD-2-Clause"
] | null | null | null |
from .data import DataLoader, DynamicDataGenerator, StaticDataGenerator
| 36
| 71
| 0.875
| 6
| 72
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 72
| 1
| 72
| 72
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
61f30d0969f9508ada86158a1a5334938e26b0fe
| 126
|
py
|
Python
|
week03/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | null | null | null |
week03/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | null | null | null |
week03/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | 1
|
2019-11-27T20:28:19.000Z
|
2019-11-27T20:28:19.000Z
|
pokemon = ["피카츄", "라이츄", "파이리", "꼬부기", "버터플", "야도란", "피죤투", "또가스"]
print(pokemon[:3])
print(pokemon[3:])
print(pokemon[::2])
| 21
| 66
| 0.555556
| 18
| 126
| 3.888889
| 0.666667
| 0.514286
| 0.371429
| 0.514286
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026786
| 0.111111
| 126
| 5
| 67
| 25.2
| 0.598214
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1125326393feb86d200d70d7e850b5c4d73e76e4
| 38
|
py
|
Python
|
bcolz/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 130
|
2015-07-28T03:41:21.000Z
|
2022-03-16T03:07:41.000Z
|
bcolz/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 119
|
2015-08-01T00:54:06.000Z
|
2021-01-05T13:00:46.000Z
|
bcolz/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 72
|
2015-07-29T02:35:56.000Z
|
2022-02-26T14:31:15.000Z
|
import sys
import bcolz
bcolz.test()
| 7.6
| 12
| 0.763158
| 6
| 38
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 4
| 13
| 9.5
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3a2114257679a30200536e9e5ec88b7ddd09c193
| 165
|
py
|
Python
|
hknweb/candidate/admin/activities/__init__.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/candidate/admin/activities/__init__.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/candidate/admin/activities/__init__.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
from hknweb.candidate.admin.activities.officer_challenge import OffChallengeAdmin
from hknweb.candidate.admin.activities.bitbyteactivity import BitByteActivityAdmin
| 55
| 82
| 0.90303
| 17
| 165
| 8.705882
| 0.647059
| 0.135135
| 0.256757
| 0.324324
| 0.459459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048485
| 165
| 2
| 83
| 82.5
| 0.942675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3a42090a810b424b01c30256f2f97572a687ab2d
| 37
|
py
|
Python
|
python/galois/connected_components.py
|
bigwater/Galois
|
03738c883301844cfb15a71647744a59184f43c0
|
[
"BSD-3-Clause"
] | 230
|
2018-06-20T22:18:31.000Z
|
2022-03-27T13:09:59.000Z
|
python/galois/connected_components.py
|
bigwater/Galois
|
03738c883301844cfb15a71647744a59184f43c0
|
[
"BSD-3-Clause"
] | 307
|
2018-06-23T12:45:31.000Z
|
2022-03-26T01:54:38.000Z
|
python/galois/connected_components.py
|
bigwater/Galois
|
03738c883301844cfb15a71647744a59184f43c0
|
[
"BSD-3-Clause"
] | 110
|
2018-06-19T04:39:16.000Z
|
2022-03-29T01:55:47.000Z
|
from ._connected_components import *
| 18.5
| 36
| 0.837838
| 4
| 37
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3a50c8f492b9a769883b1fca6f7e74fa306e4365
| 46
|
py
|
Python
|
testing/lambdas/echo.py
|
nmittal18/working_open_lambda
|
a6b280d107a01ad1366a2ca0ccbb6f4dce736f52
|
[
"Apache-2.0"
] | null | null | null |
testing/lambdas/echo.py
|
nmittal18/working_open_lambda
|
a6b280d107a01ad1366a2ca0ccbb6f4dce736f52
|
[
"Apache-2.0"
] | null | null | null |
testing/lambdas/echo.py
|
nmittal18/working_open_lambda
|
a6b280d107a01ad1366a2ca0ccbb6f4dce736f52
|
[
"Apache-2.0"
] | 1
|
2020-01-08T18:00:04.000Z
|
2020-01-08T18:00:04.000Z
|
def handler(db_conn, event):
return event
| 15.333333
| 28
| 0.717391
| 7
| 46
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195652
| 46
| 2
| 29
| 23
| 0.864865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
28ea526ee38a9fa0402df78f1f3de391a9526f1b
| 29
|
py
|
Python
|
json2dir/__init__.py
|
Kanahiro/json2dir
|
d7fc1959bc743df051f9ec43a09b8f01a7caad74
|
[
"MIT"
] | 1
|
2020-05-08T18:57:00.000Z
|
2020-05-08T18:57:00.000Z
|
json2dir/__init__.py
|
Kanahiro/json2dir
|
d7fc1959bc743df051f9ec43a09b8f01a7caad74
|
[
"MIT"
] | null | null | null |
json2dir/__init__.py
|
Kanahiro/json2dir
|
d7fc1959bc743df051f9ec43a09b8f01a7caad74
|
[
"MIT"
] | null | null | null |
from .main import dir_list_of
| 29
| 29
| 0.862069
| 6
| 29
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e917ab68fda25bcdfc856567ecc1cee1f5cb7041
| 185
|
py
|
Python
|
configs/dla/dla34_bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
configs/dla/dla34_bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
configs/dla/dla34_bdd100k.py
|
XDong18/mmclassification
|
115c39ed4673d9cdd7b5f543482c1038f0c77ab5
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/datasets/bdd100k.py', '../_base_/models/dla34_bdd100k.py',
'../_base_/schedules/bdd100k.py', '../_base_/default_runtime.py'
]
find_unused_parameters = True
| 37
| 73
| 0.697297
| 22
| 185
| 5.227273
| 0.590909
| 0.234783
| 0.33913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065868
| 0.097297
| 185
| 5
| 74
| 37
| 0.622754
| 0
| 0
| 0
| 0
| 0
| 0.645161
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3aab1155699ccb69273e6a936478287361f131ab
| 77
|
py
|
Python
|
tests/test_mysorn.py
|
lneisenman/mysorn
|
f81ce6e61594f9f4da32a9323c6f0ee85c27d7af
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_mysorn.py
|
lneisenman/mysorn
|
f81ce6e61594f9f4da32a9323c6f0ee85c27d7af
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_mysorn.py
|
lneisenman/mysorn
|
f81ce6e61594f9f4da32a9323c6f0ee85c27d7af
|
[
"BSD-2-Clause"
] | null | null | null |
import mysorn
def test_main():
assert mysorn # use your library here
| 11
| 42
| 0.701299
| 11
| 77
| 4.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246753
| 77
| 6
| 43
| 12.833333
| 0.913793
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ab0aff6362156bad89a898004dc9c57b2abd494
| 121
|
py
|
Python
|
tests/test_peak_distribution.py
|
mwang87/SMITER
|
4af90d20042210e6ea403245dc8f73150fc5d844
|
[
"MIT"
] | 6
|
2021-03-12T04:23:24.000Z
|
2022-02-03T19:47:04.000Z
|
tests/test_peak_distribution.py
|
mwang87/SMITER
|
4af90d20042210e6ea403245dc8f73150fc5d844
|
[
"MIT"
] | 103
|
2021-03-12T00:34:20.000Z
|
2022-03-31T19:53:05.000Z
|
tests/test_peak_distribution.py
|
mwang87/SMITER
|
4af90d20042210e6ea403245dc8f73150fc5d844
|
[
"MIT"
] | 1
|
2021-03-12T00:38:57.000Z
|
2021-03-12T00:38:57.000Z
|
"""Summary."""
def test_gauss_dist():
"""Summary."""
pass
def test_gamma_dist():
"""Summary."""
pass
| 10.083333
| 22
| 0.53719
| 13
| 121
| 4.692308
| 0.538462
| 0.229508
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239669
| 121
| 11
| 23
| 11
| 0.663043
| 0.214876
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
3ac090235e2e1edaa4bcc0e7120132fbd0aeb302
| 14,751
|
py
|
Python
|
tests/test_services/test_set_687.py
|
ucloud/ucloud-sdk-python2
|
90fb43198df73a78d64bbd98675dc7b302856057
|
[
"Apache-2.0"
] | 19
|
2019-05-15T13:41:58.000Z
|
2019-11-13T09:09:37.000Z
|
tests/test_services/test_set_687.py
|
ucloud/ucloud-sdk-python2
|
90fb43198df73a78d64bbd98675dc7b302856057
|
[
"Apache-2.0"
] | 9
|
2019-07-24T08:31:33.000Z
|
2020-09-22T04:01:46.000Z
|
tests/test_services/test_set_687.py
|
ucloud/ucloud-sdk-python2
|
90fb43198df73a78d64bbd98675dc7b302856057
|
[
"Apache-2.0"
] | 3
|
2019-06-18T00:22:07.000Z
|
2020-04-24T02:28:06.000Z
|
# -*- coding: utf-8 -*-
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(687)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_687(client, variables):
scenario.initial(variables)
scenario.variables["VPC_name_1"] = "VPC_api_test_1"
scenario.variables["remark"] = "remark_api_test"
scenario.variables["tag"] = "tag_api_test"
scenario.variables["Subnet_name_1_1"] = "subnet_1_1"
scenario.variables["subnet_netmask"] = 24
scenario.variables["project_id"] = "org-achi1o"
scenario.run(client)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "GetProjectListResponse"),
],
action="GetProjectList",
)
def get_project_list_00(client, variables):
d = {}
try:
resp = client.uaccount().get_project_list(d)
except exc.RetCodeException as e:
resp = e.json()
variables["project_list"] = utest.value_at_path(resp, "ProjectSet")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVPC",
)
def create_vpc_01(client, variables):
d = {
"Tag": variables.get("tag"),
"Remark": variables.get("remark"),
"Region": variables.get("Region"),
"Network": ["172.16.16.0/20"],
"Name": variables.get("VPC_name_1"),
}
try:
resp = client.vpc().create_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VPCId_1"] = utest.value_at_path(resp, "VPCId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSubnet",
)
def create_subnet_02(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Tag": variables.get("tag"),
"SubnetName": variables.get("Subnet_name_1_1"),
"Subnet": "172.16.17.0",
"Remark": variables.get("remark"),
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_1_1"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "UpdateSubnetAttributeResponse"),
],
action="UpdateSubnetAttribute",
)
def update_subnet_attribute_03(client, variables):
d = {
"Tag": "qa",
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().update_subnet_attribute(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeSubnet",
)
def describe_subnet_04(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
"Offset": 1,
"Limit": 1,
}
try:
resp = client.vpc().describe_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
action="CreateVPC",
)
def create_vpc_05(client, variables):
d = {
"Region": variables.get("Region"),
"Network": ["192.168.16.0/20"],
"Name": "vpc_2",
}
try:
resp = client.vpc().create_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VPCId_2"] = utest.value_at_path(resp, "VPCId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateSubnet",
)
def create_subnet_06(client, variables):
d = {
"VPCId": variables.get("VPCId_2"),
"SubnetName": "Subnet_2_1",
"Subnet": "192.168.17.0",
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_2_1"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "CreateSubnetResponse"),
],
action="CreateSubnet",
)
def create_subnet_07(client, variables):
d = {
"VPCId": variables.get("VPCId_2"),
"Tag": "Subnet_2_2",
"SubnetName": "Subnet_2_2",
"Subnet": "192.168.18.0",
"Region": variables.get("Region"),
"Netmask": variables.get("subnet_netmask"),
}
try:
resp = client.vpc().create_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
variables["SubnetId_2_2"] = utest.value_at_path(resp, "SubnetId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.VPCId", variables.get("VPCId_1")),
("str_eq", "DataSet.0.VPCName", variables.get("VPC_name_1")),
("str_eq", "DataSet.0.SubnetId", variables.get("SubnetId_1_1")),
("str_eq", "DataSet.0.SubnetName", variables.get("Subnet_name_1_1")),
("str_eq", "DataSet.0.Tag", "qa"),
("str_eq", "DataSet.0.Remark", variables.get("remark")),
("str_eq", "DataSet.0.SubnetType", 2),
("str_eq", "DataSet.0.Netmask", 24),
],
action="DescribeSubnet",
)
def describe_subnet_08(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().describe_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="AllocateVIP",
)
def allocate_vip_09(client, variables):
d = {
"Zone": variables.get("Zone"),
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Remark": "vip_tag1",
"Region": variables.get("Region"),
"Name": "vip_api_auto",
}
try:
resp = client.unet().allocate_vip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VIPId_1"] = utest.value_at_path(resp, "VIPSet.0.VIPId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "VIPSet.0.VPCId", variables.get("VPCId_1")),
("str_eq", "VIPSet.0.VIPId", variables.get("VIPId_1")),
("str_eq", "VIPSet.0.SubnetId", variables.get("SubnetId_1_1")),
],
action="DescribeVIP",
)
def describe_vip_10(client, variables):
d = {
"Zone": variables.get("Zone"),
"VPCId": variables.get("VPCId_1"),
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.unet().describe_vip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["VIP_ip_1"] = utest.value_at_path(resp, "DataSet.0")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "TotalCount", 1),
("str_eq", "DataSet.0.ResourceId", variables.get("VIPId_1")),
("str_eq", "DataSet.0.IP", variables.get("VIP_ip_1")),
],
action="DescribeSubnetResource",
)
def describe_subnet_resource_11(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
"Offset": 0,
"Limit": 20,
}
try:
resp = client.vpc().describe_subnet_resource(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ReleaseVIP",
)
def release_vip_12(client, variables):
d = {
"Zone": variables.get("Zone"),
"VIPId": variables.get("VIPId_1"),
"Region": variables.get("Region"),
}
try:
resp = client.unet().release_vip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_13(client, variables):
d = {
"SubnetId": variables.get("SubnetId_1_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_14(client, variables):
d = {
"SubnetId": variables.get("SubnetId_2_1"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteSubnet",
)
def delete_subnet_15(client, variables):
d = {
"SubnetId": variables.get("SubnetId_2_2"),
"Region": variables.get("Region"),
}
try:
resp = client.vpc().delete_subnet(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "AddVPCNetworkResponse"),
],
action="AddVPCNetwork",
)
def add_vpc_network_16(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"Network": ["10.100.96.0/20"],
}
try:
resp = client.vpc().add_vpc_network(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribeVPCResponse"),
],
action="DescribeVPC",
)
def describe_vpc_17(client, variables):
d = {
"VPCIds": [variables.get("VPCId_1")],
"Region": variables.get("Region"),
}
try:
resp = client.vpc().describe_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateVPCIntercom",
)
def create_vpc_intercom_18(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"DstVPCId": variables.get("VPCId_2"),
"DstRegion": variables.get("Region"),
"DstProjectId": funcs.search_value(
variables.get("project_list"), "IsDefault", True, "ProjectId"
),
}
try:
resp = client.vpc().create_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "DataSet.0.VPCId", variables.get("VPCId_2")),
],
action="DescribeVPCIntercom",
)
def describe_vpc_intercom_19(client, variables):
d = {"VPCId": variables.get("VPCId_1"), "Region": variables.get("Region")}
try:
resp = client.vpc().describe_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVPCIntercom",
)
def delete_vpc_intercom_20(client, variables):
d = {
"VPCId": variables.get("VPCId_1"),
"Region": variables.get("Region"),
"DstVPCId": variables.get("VPCId_2"),
"DstRegion": variables.get("Region"),
"DstProjectId": funcs.search_value(
variables.get("project_list"), "IsDefault", True, "ProjectId"
),
}
try:
resp = client.vpc().delete_vpc_intercom(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DeleteVPC",
)
def delete_vpc_21(client, variables):
d = {"VPCId": variables.get("VPCId_1"), "Region": variables.get("Region")}
try:
resp = client.vpc().delete_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
action="DeleteVPC",
)
def delete_vpc_22(client, variables):
d = {"VPCId": variables.get("VPCId_2"), "Region": variables.get("Region")}
try:
resp = client.vpc().delete_vpc(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
| 26.578378
| 78
| 0.611009
| 1,781
| 14,751
| 4.869175
| 0.098821
| 0.102399
| 0.049816
| 0.058349
| 0.809848
| 0.757265
| 0.73893
| 0.719788
| 0.677352
| 0.672048
| 0
| 0.026678
| 0.232594
| 14,751
| 554
| 79
| 26.626354
| 0.739399
| 0.005017
| 0
| 0.689243
| 1
| 0
| 0.176687
| 0.007839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047809
| false
| 0
| 0.007968
| 0
| 0.101594
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c9457ae8e0f0b711eb26cd64355bddaf8ed2200e
| 44
|
py
|
Python
|
SecondLife/SimpleBot/Program.py
|
uoy-research/DED
|
b03a03ac59a0c3243377ce261d1440cc65731a9e
|
[
"MIT"
] | null | null | null |
SecondLife/SimpleBot/Program.py
|
uoy-research/DED
|
b03a03ac59a0c3243377ce261d1440cc65731a9e
|
[
"MIT"
] | null | null | null |
SecondLife/SimpleBot/Program.py
|
uoy-research/DED
|
b03a03ac59a0c3243377ce261d1440cc65731a9e
|
[
"MIT"
] | null | null | null |
import bot
print 'Starting Iron Python'
| 6.285714
| 28
| 0.727273
| 6
| 44
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 44
| 6
| 29
| 7.333333
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
c9670e09673239fcdffea0b6bf2f1eb4c63933b4
| 32
|
py
|
Python
|
src/audio_room/envs/__init__.py
|
pseeth/otoworld
|
636ca717c6e571b465ddcd836fa430ccdc53debf
|
[
"MIT"
] | 17
|
2020-06-16T06:37:03.000Z
|
2020-10-15T00:25:05.000Z
|
src/audio_room/envs/__init__.py
|
pseeth/otoworld
|
636ca717c6e571b465ddcd836fa430ccdc53debf
|
[
"MIT"
] | 5
|
2020-10-18T23:50:49.000Z
|
2021-04-14T02:36:08.000Z
|
src/audio_room/envs/__init__.py
|
pseeth/otoworld
|
636ca717c6e571b465ddcd836fa430ccdc53debf
|
[
"MIT"
] | 2
|
2020-07-17T11:51:30.000Z
|
2020-09-21T14:50:56.000Z
|
from .audio_env import AudioEnv
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a348c1cf107c7e9e719f55a7cef7fbf40fc96452
| 1,437
|
py
|
Python
|
utils/parameters.py
|
wcode-wzx/yolov5-for-price-tag-recognition
|
a488de2b2637753d570343093e2c06f6782927a5
|
[
"MIT"
] | 4
|
2021-03-24T09:28:26.000Z
|
2021-04-09T10:15:38.000Z
|
utils/parameters.py
|
wcode-wzx/yolov5-for-price-tag-recognition
|
a488de2b2637753d570343093e2c06f6782927a5
|
[
"MIT"
] | null | null | null |
utils/parameters.py
|
wcode-wzx/yolov5-for-price-tag-recognition
|
a488de2b2637753d570343093e2c06f6782927a5
|
[
"MIT"
] | null | null | null |
import urllib
import urllib.request
class a_path():
images_path = 'runs/detect/exp/images/'
cut_path = ''
labels_path = 'runs/detect/exp/labels/'
price_path = 'runs/detect/exp/price/'
class opt(object):
def __init__(self):
self.source = 'data/images'
self.agnostic_nms = False
self.augment = False
self.classes = None
self.conf_thres=0.25
self.device='0'
self.exist_ok=False
self.img_size=640
self.iou_thres=0.45
self.name='exp'
self.project='runs/detect'
self.save_conf=False
self.save_txt=True
self.view_img=False
self.weights='weights/dingwei.pt'
def list_all_member(self):
for name,value in vars(self).items():
print('%s=%s'%(name,value))
class opt2(object):
def __init__(self):
self.source = 'runs/detect/exp/images/'
self.agnostic_nms = False
self.augment = False
self.classes = None
self.conf_thres=0.25
self.device='0'
self.exist_ok=False
self.img_size=640
self.iou_thres=0.45
self.name='exp'
self.project='runs/detect'
self.save_conf=False
self.save_txt=True
self.view_img=False
self.weights='weights/shibie.pt'
def list_all_member(self):
for name,value in vars(self).items():
print('%s=%s'%(name,value))
| 27.113208
| 47
| 0.592206
| 194
| 1,437
| 4.216495
| 0.304124
| 0.110024
| 0.06357
| 0.062347
| 0.779951
| 0.779951
| 0.713936
| 0.713936
| 0.713936
| 0.713936
| 0
| 0.020428
| 0.284621
| 1,437
| 52
| 48
| 27.634615
| 0.775292
| 0
| 0
| 0.723404
| 0
| 0
| 0.123259
| 0.06337
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.042553
| 0
| 0.276596
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a355fc33833c9a16a716aff867078f035d30a256
| 35
|
py
|
Python
|
crawler/__init__.py
|
subhendusethi/nytimes-article-crawler
|
8e74831f76452e3ae2c7155b2361536cc31be3e8
|
[
"MIT"
] | 8
|
2017-05-08T03:58:07.000Z
|
2021-04-15T08:42:21.000Z
|
crawler/__init__.py
|
subhendusethi/nytimes-article-crawler
|
8e74831f76452e3ae2c7155b2361536cc31be3e8
|
[
"MIT"
] | 1
|
2020-01-20T07:55:51.000Z
|
2020-03-11T12:18:12.000Z
|
crawler/__init__.py
|
subhendusethi/nytimes-article-crawler
|
8e74831f76452e3ae2c7155b2361536cc31be3e8
|
[
"MIT"
] | 5
|
2018-09-28T13:03:48.000Z
|
2022-03-02T05:51:02.000Z
|
from .nytimescrawler import Crawler
| 35
| 35
| 0.885714
| 4
| 35
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a3893d60597d53e36cc5611617fa5a0317d039f5
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/clikit/io/output_stream/buffered_output_stream.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/clikit/io/output_stream/buffered_output_stream.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/clikit/io/output_stream/buffered_output_stream.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/53/d2/fd/7a91d37c08f7671c2cc34944c0a1684c1b929cc36e53f4c38d27ffbd97
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3a020e08283b8ffc790d728c8f8e2d1f5c64e2c
| 1,290
|
py
|
Python
|
missingnumber_test.py
|
mutakubwa/python_practice
|
5fef46659ee162c3accfee44e2675eeb0e9c6e95
|
[
"Apache-2.0"
] | null | null | null |
missingnumber_test.py
|
mutakubwa/python_practice
|
5fef46659ee162c3accfee44e2675eeb0e9c6e95
|
[
"Apache-2.0"
] | null | null | null |
missingnumber_test.py
|
mutakubwa/python_practice
|
5fef46659ee162c3accfee44e2675eeb0e9c6e95
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from missingnumber import missing_number_1, missing_number_2
class MissingNumber_Test(unittest.TestCase):
def test_missingnumber1(self):
testcase1 = 5
testcase2 = [2,2,3,4,5]
expected = 1
self.assertEqual(missing_number_1(testcase1, testcase2), expected)
def test_missingnumber2(self):
testcase1 = 5
testcase2 = [2,2,5,4,1]
expected = 3
self.assertEqual(missing_number_1(testcase1, testcase2), expected)
def test_missingnumber3(self):
testcase1 = 5
testcase2 = [5,5,3,2,4]
expected = 1
self.assertEqual(missing_number_1(testcase1, testcase2), expected)
def test_missingnumber4(self):
testcase1 = 5
testcase2 = [2,2,3,4,5]
expected = 1
self.assertEqual(missing_number_2(testcase1, testcase2), expected)
def test_missingnumber5(self):
testcase1 = 5
testcase2 = [2,2,5,4,1]
expected = 3
self.assertEqual(missing_number_2(testcase1, testcase2), expected)
def test_missingnumber6(self):
testcase1 = 5
testcase2 = [5,5,3,2,4]
expected = 1
self.assertEqual(missing_number_2(testcase1, testcase2), expected)
if __name__ == '__main__':
unittest.main()
| 30.714286
| 74
| 0.646512
| 153
| 1,290
| 5.248366
| 0.189542
| 0.129514
| 0.104608
| 0.171856
| 0.731009
| 0.731009
| 0.731009
| 0.731009
| 0.731009
| 0.731009
| 0
| 0.083247
| 0.255039
| 1,290
| 42
| 75
| 30.714286
| 0.752341
| 0
| 0
| 0.685714
| 0
| 0
| 0.006197
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.171429
| false
| 0
| 0.057143
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
42e38efbda5c20c695510ee3ddb0d7a82a4e6b8e
| 144
|
py
|
Python
|
agent/controller/__init__.py
|
intelligent-control-lab/Composable_Agent_Toolbox
|
39d71cdc0475ae6901cb30b63d181737bea35889
|
[
"MIT"
] | 4
|
2020-10-20T14:30:09.000Z
|
2022-02-19T23:46:04.000Z
|
agent/controller/__init__.py
|
intelligent-control-lab/Composable_Agent_Toolbox
|
39d71cdc0475ae6901cb30b63d181737bea35889
|
[
"MIT"
] | null | null | null |
agent/controller/__init__.py
|
intelligent-control-lab/Composable_Agent_Toolbox
|
39d71cdc0475ae6901cb30b63d181737bea35889
|
[
"MIT"
] | 1
|
2022-03-12T10:46:38.000Z
|
2022-03-12T10:46:38.000Z
|
from .controller import Controller
from .naive_controller import NaiveController, NaiveJointController
from .cbf_controller import CBFController
| 48
| 67
| 0.888889
| 15
| 144
| 8.4
| 0.533333
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 3
| 68
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
283a67c915a5ff78e753fafe73693a161b6ebbdc
| 112
|
py
|
Python
|
design_bench/oracles/sklearn/__init__.py
|
brandontrabucco/design_bench
|
824516ec59396aded3ca55ec7c1c313626ecaceb
|
[
"MIT"
] | 27
|
2020-06-30T00:57:12.000Z
|
2022-03-25T16:24:11.000Z
|
design_bench/oracles/sklearn/__init__.py
|
brandontrabucco/design_bench
|
824516ec59396aded3ca55ec7c1c313626ecaceb
|
[
"MIT"
] | 7
|
2021-02-16T06:25:02.000Z
|
2022-03-31T17:21:17.000Z
|
design_bench/oracles/sklearn/__init__.py
|
brandontrabucco/design_bench
|
824516ec59396aded3ca55ec7c1c313626ecaceb
|
[
"MIT"
] | 5
|
2021-07-19T12:16:32.000Z
|
2022-03-01T16:56:16.000Z
|
from .random_forest_oracle import RandomForestOracle
from .gaussian_process_oracle import GaussianProcessOracle
| 37.333333
| 58
| 0.910714
| 12
| 112
| 8.166667
| 0.75
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 112
| 2
| 59
| 56
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
284394bdfa22179834416943d0f83911c4f3a37b
| 28
|
py
|
Python
|
pigar/tests/imports_example/subfoo/foo.py
|
yasirroni/pigar
|
823a3c2478361d53dba408bea75e1766d253f3c0
|
[
"BSD-3-Clause"
] | 959
|
2016-08-15T10:02:24.000Z
|
2022-03-31T12:35:39.000Z
|
pigar/tests/imports_example/subfoo/foo.py
|
yasirroni/pigar
|
823a3c2478361d53dba408bea75e1766d253f3c0
|
[
"BSD-3-Clause"
] | 67
|
2016-10-02T20:48:26.000Z
|
2022-01-08T16:29:58.000Z
|
pigar/tests/imports_example/subfoo/foo.py
|
yasirroni/pigar
|
823a3c2478361d53dba408bea75e1766d253f3c0
|
[
"BSD-3-Clause"
] | 64
|
2016-11-30T11:21:36.000Z
|
2022-02-18T19:33:37.000Z
|
def foo():
print("FOO")
| 9.333333
| 16
| 0.5
| 4
| 28
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 28
| 2
| 17
| 14
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.