hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
395981eb97ba0d5630afef67430f063c0cfadca3
| 65
|
py
|
Python
|
src/setup.py
|
ThaDeveloper/grind
|
fa90b65d12e6d9b3d658b132874801ecda08c57f
|
[
"MIT"
] | 1
|
2019-11-06T22:26:26.000Z
|
2019-11-06T22:26:26.000Z
|
src/setup.py
|
ThaDeveloper/grind
|
fa90b65d12e6d9b3d658b132874801ecda08c57f
|
[
"MIT"
] | 5
|
2021-03-19T02:49:44.000Z
|
2021-06-10T19:13:00.000Z
|
src/setup.py
|
ThaDeveloper/grind
|
fa90b65d12e6d9b3d658b132874801ecda08c57f
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(name='grind', version='1.0')
| 16.25
| 45
| 0.753846
| 9
| 65
| 5.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.076923
| 65
| 3
| 46
| 21.666667
| 0.783333
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
395bf15f68b877308dd905ac4aed62d8edba6f44
| 174
|
py
|
Python
|
LeetCode/Google Interview Questions/Unique Number of Occurrences.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | 13
|
2021-09-02T07:30:02.000Z
|
2022-03-22T19:32:03.000Z
|
LeetCode/Google Interview Questions/Unique Number of Occurrences.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | null | null | null |
LeetCode/Google Interview Questions/Unique Number of Occurrences.py
|
UtkarshPathrabe/Competitive-Coding
|
ba322fbb1b88682d56a9b80bdd92a853f1caa84e
|
[
"MIT"
] | 3
|
2021-08-24T16:06:22.000Z
|
2021-09-17T15:39:53.000Z
|
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
countsMap = Counter(arr)
return len(countsMap.keys()) == len(set(countsMap.values()))
| 43.5
| 68
| 0.649425
| 20
| 174
| 5.65
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195402
| 174
| 4
| 68
| 43.5
| 0.807143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
39609757874076a7f9fb1412a7aaba1659bb2caf
| 86
|
py
|
Python
|
workschedule/shift_exception.py
|
point85/PyShift
|
46d0e1c23d3c570416633f68d416abd587b8e22f
|
[
"MIT"
] | null | null | null |
workschedule/shift_exception.py
|
point85/PyShift
|
46d0e1c23d3c570416633f68d416abd587b8e22f
|
[
"MIT"
] | null | null | null |
workschedule/shift_exception.py
|
point85/PyShift
|
46d0e1c23d3c570416633f68d416abd587b8e22f
|
[
"MIT"
] | null | null | null |
## Base class for more specific exceptions
class PyShiftException(Exception):
pass
| 28.666667
| 42
| 0.790698
| 10
| 86
| 6.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151163
| 86
| 3
| 43
| 28.666667
| 0.931507
| 0.453488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
3963f28189e11bd041986953f7c2ffe5e7a4d4a7
| 288
|
py
|
Python
|
equipment/framework/Mail/AbstractMail.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 1
|
2022-03-02T11:32:10.000Z
|
2022-03-02T11:32:10.000Z
|
equipment/framework/Mail/AbstractMail.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 35
|
2022-03-02T14:33:49.000Z
|
2022-03-30T08:14:26.000Z
|
equipment/framework/Mail/AbstractMail.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 1
|
2022-03-24T11:52:01.000Z
|
2022-03-24T11:52:01.000Z
|
import abc
from typing import Union
from equipment.framework.Mail.Email.Email import Email
from equipment.framework.Mail.Email.EmailFactory import EmailFactory
class AbstractMail(abc.ABC):
def send(self, email: Union[Email, EmailFactory]) -> bool:
raise NotImplementedError
| 28.8
| 68
| 0.788194
| 36
| 288
| 6.305556
| 0.5
| 0.114537
| 0.193833
| 0.229075
| 0.273128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135417
| 288
| 9
| 69
| 32
| 0.911647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.571429
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3969eae37682dde13327b382a75d05a161284884
| 7,164
|
py
|
Python
|
interactiontransformer/utils.py
|
jlevy44/InteractionTransformer
|
e92473889cca25a292baa4a54e84490bf7699063
|
[
"MIT"
] | 4
|
2019-12-16T08:54:42.000Z
|
2021-11-05T15:29:52.000Z
|
interactiontransformer/utils.py
|
jlevy44/InteractionTransformer
|
e92473889cca25a292baa4a54e84490bf7699063
|
[
"MIT"
] | 5
|
2020-05-20T00:55:06.000Z
|
2021-08-28T20:23:09.000Z
|
interactiontransformer/utils.py
|
jlevy44/InteractionTransformer
|
e92473889cca25a292baa4a54e84490bf7699063
|
[
"MIT"
] | null | null | null |
from openml import datasets, tasks, runs, flows, config
import os, pandas, sklearn, arff, pprint, numpy, seaborn
import numpy as np, pandas as pd, pickle
import copy
from sklearn.model_selection import StratifiedKFold
import dask
MANUSCRIPT_IDs=np.array([4154, 4329, 4136, 4137, 73, 143, 142, 72, 77, 120, 135, 139, 146, 161, 162, 273, 274, 262, 264, 267, 269, 256, 257, 258, 260, 293, 246, 351, 354, 346, 1161, 1162, 1163, 1164, 1165, 1166, 1141, 1144, 1145, 1146, 890, 1147, 1149, 1150, 1181, 1182, 1178, 1039, 1016, 1236, 1372, 1374, 1375, 1376, 1205, 1212, 1216, 1217, 1219, 1237, 1238, 1241, 1151, 1152, 1153, 1154, 1155, 1157, 1158, 1159, 1160, 1561, 1562, 1563, 1564, 1597, 1085, 446, 1042, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1458, 1412, 1441, 1448, 1449, 1450, 1131, 1132, 1133, 1134, 1135, 1137, 1138, 1139, 1502, 1136, 1369, 41946, 41945, 1371, 1180, 40900, 41949, 41967, 41966, 41964, 1218, 1373, 1211, 1148, 40645, 40646, 40647, 40648, 40649, 40650, 40660, 40588, 40589, 40590, 40665, 40666, 40669, 41228, 41672, 41674, 41675, 41679, 41680, 41682, 41684, 41685, 40514, 40515, 40517, 40518, 40922, 40910, 41144, 41145, 41156, 41157, 41158, 40693, 40702, 40704, 40705, 40706, 40710, 40680, 40681, 40683, 40690, 40713, 40714, 40999, 41026, 41005, 41007, 41538, 41521, 40591, 40592, 40593, 40594, 40595, 40596, 40597, 41496, 4134, 1485, 40994, 40983, 40978, 4534, 132, 1220, 1442, 1443, 1444, 1446, 1547, 1467, 1447, 1451, 1452, 1460, 1463, 1566, 1556, 1495, 1506, 1507, 1496, 1498, 1510, 1511, 1524, 1484, 1488, 1473, 1490, 121, 40701, 126, 41973, 376, 41977, 41978, 41976, 1600, 124, 153, 1471, 1462, 23499, 41896, 41897, 41714, 41715, 41716, 41711, 41712, 41713, 41719, 41720, 41721, 41722, 41723, 41717, 41718, 41709, 41710, 41708, 41814, 41815, 41816, 41803, 41804, 41805, 41806, 41807, 41808, 41800, 41801, 41802, 41817, 41818, 41819, 41811, 41812, 41813, 41798, 41799, 41820, 41821, 41809, 41810, 41879, 41880, 41881, 41893, 41895, 41874, 41875, 41876, 41888, 41889, 41890, 41882, 41883, 41884, 41871, 41872, 41873, 41885, 41886, 41887, 41891, 41892, 41877, 41878, 140, 41759, 41760, 41761, 41767, 41768, 41769, 41754, 41755, 41756, 41748, 41749, 41750, 41751, 41752, 41753, 41762, 41763, 41764, 41770, 41771, 41772, 41765, 41766, 41757, 41758, 41868, 41869, 41870, 41849, 41850, 41851, 41846, 41847, 41848, 41862, 41863, 41864, 41865, 41866, 41867, 41857, 41858, 41859, 41852, 41853, 41854, 41860, 41861, 41855, 41856, 41737, 41738, 41739, 41740, 41741, 41742, 41727, 41707, 41728, 41729, 41730, 41731, 41745, 41746, 41747, 41724, 41725, 41726, 41734, 41735, 41736, 41743, 41744, 41732, 41733, 41786, 41787, 41788, 41789, 41790, 41791, 41792, 41793, 41794, 41795, 41796, 41797, 41775, 41776, 41777, 41778, 41779, 41780, 41781, 41782, 41783, 41773, 41774, 41784, 41785, 41838, 41839, 41840, 41833, 41834, 41835, 41825, 41826, 41827, 41841, 41842, 41843, 41822, 41823, 41824, 41828, 41829, 41830, 41844, 41845, 41836, 41837, 41831, 41832, 128, 41998, 122, 152, 41544, 1480, 1122, 41142, 41161, 41150, 41159, 41143, 41146, 1169, 1140, 1142, 1486, 4135, 40981, 23517, 131, 1167, 1242, 41894, 1130, 1143, 1235, 1461, 1377, 1558, 1464, 164, 59, 137, 459, 450, 444, 448, 41898, 40, 53, 43, 312, 316, 336, 337, 334, 742, 743, 744, 745, 752, 753, 748, 749, 740, 741, 746, 747, 750, 751, 754, 755, 756, 758, 759, 761, 770, 771, 766, 767, 764, 765, 768, 769, 772, 773, 762, 763, 788, 789, 784, 785, 787, 775, 776, 782, 783, 779, 780, 777, 778, 938, 941, 953, 951, 946, 947, 945, 949, 950, 942, 943, 955, 956, 958, 954, 959, 962, 964, 965, 983, 978, 979, 980, 969, 974, 976, 977, 970, 971, 973, 1038, 1045, 1022, 1040, 1046, 1048, 1025, 1026, 1020, 1021, 1075, 1104, 1073, 1107, 1069, 1071, 1068, 927, 928, 919, 920, 916, 917, 918, 923, 924, 922, 925, 926, 921, 915, 914, 910, 911, 907, 905, 906, 913, 909, 912, 908, 896, 903, 904, 894, 901, 902, 882, 884, 895, 886, 900, 934, 935, 936, 937, 931, 932, 933, 929, 991, 987, 996, 997, 988, 994, 995, 1019, 1013, 1014, 1015, 1006, 1009, 1011, 1012, 1004, 1005, 1066, 1067, 1050, 1064, 1065, 1054, 1055, 1063, 1056, 1061, 1062, 1049, 1059, 1060, 1120, 1121, 1370, 251, 310, 311, 350, 357, 801, 793, 794, 790, 797, 799, 791, 792, 796, 795, 800, 774, 806, 803, 807, 808, 805, 479, 476, 467, 472, 461, 463, 464, 465, 717, 718, 719, 720, 682, 683, 713, 714, 715, 716, 721, 722, 734, 735, 723, 724, 732, 733, 736, 737, 728, 729, 730, 731, 726, 727, 725, 812, 818, 819, 816, 817, 811, 814, 821, 815, 804, 813, 833, 834, 824, 825, 832, 829, 830, 820, 826, 827, 828, 823, 851, 837, 838, 849, 850, 843, 847, 848, 845, 846, 841, 835, 836, 862, 863, 867, 868, 864, 855, 857, 860, 859, 853, 865, 866, 879, 873, 874, 869, 870, 877, 878, 875, 876, 871, 887, 888, 889, 891, 892, 893, 885, 880, 881, 31, 3, 1489, 1504, 1494, 50, 1116, 1479, 1453, 1156, 44, 151, 37, 1487, 335, 333, 1455])
def download_openml(ID=0,api_key='',tmp='tmp',dataset_path='datasets'):
config.apikey = api_key
os.makedirs(tmp,exist_ok=True)
os.makedirs(dataset_path,exist_ok=True)
config.set_cache_directory(os.path.abspath('tmp'))
if not os.path.exists('{}/{}.p'.format(datasets_path,ID)):
try:
odata = datasets.get_dataset(int(ID))
X, y, categorical, attribute_names = odata.get_data(
target=odata.default_target_attribute)
y=y.astype(str)
if not isinstance(y,np.ndarray):
y=y.values
X=np.hstack((X,y.reshape(-1,1)))
df=pd.DataFrame(X, columns=attribute_names+[odata.default_target_attribute])
pickle.dump(dict(cat=categorical,names=attribute_names+[odata.default_target_attribute],X=df),open('{}/{}.p'.format(dataset_path,ID),'wb'))
return 1
except Exception as e:
return 0
return 2
def download_multiple_openml(IDs=MANUSCRIPT_IDs,api_key='',tmp='tmp',dataset_path='datasets'):
dask.compute(*[dask.delayed(download_openml)(ID,api_key,tmp,dataset_path) for ID in IDs], scheduler='processes', num_workers=10)
def get_dataset(dataset='datasets/0.p'):
d=pickle.load(open(dataset,'rb'))
return d['X'].iloc[:,:-1],d['X'].iloc[:,-1],np.array(d['cat'])
def preprocess_data(X,y,cat, return_xy=False):
if not X.isnull().sum().sum():
X_nocat,X_cat=X.loc[:,~cat],X.loc[:,cat]
if X_cat.shape[1]:
ncat=np.array(X_cat.nunique().tolist())
X_cat=X_cat.loc[:,ncat<100]
ncat=np.array(X_cat.nunique().tolist())
X_cat=pd.get_dummies(X_cat,columns=X_cat.columns.values[ncat>2]).apply(lambda x: x==x.unique()[0],axis=0)
X_cat=X_cat.astype(int)
X=pd.concat([X_nocat,X_cat],axis=1).astype(float)
y=(y==y.unique()[0]).astype(float)
if return_xy:
yield X,y
else:
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
info_dict=dict(n=X.shape[0],p=X.shape[1],pcat=X_cat.shape[1],cb=max(1.-np.mean(y),np.mean(y)))
for train, test in cv.split(X,y):
X_train, X_test, y_train, y_test = X.iloc[train, :], X.iloc[test, :], y.iloc[train], y.iloc[test]
yield X_train, X_test, y_train, y_test, copy.deepcopy(info_dict)
| 125.684211
| 4,598
| 0.656477
| 1,180
| 7,164
| 3.930508
| 0.766949
| 0.011212
| 0.005821
| 0.017464
| 0.054334
| 0.054334
| 0.036654
| 0.023286
| 0.013799
| 0
| 0
| 0.518468
| 0.176159
| 7,164
| 56
| 4,599
| 127.928571
| 0.267367
| 0
| 0
| 0.039216
| 0
| 0
| 0.009631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.117647
| 0
| 0.27451
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
396eefa606edee58bbd568f0cb16e09f7c914c23
| 526
|
py
|
Python
|
diagnosis/models/__init__.py
|
Naceine/diagnosis
|
0068fcd597d7607aee1723a551ba47e72047e098
|
[
"BSD-3-Clause"
] | 3
|
2019-09-29T11:13:38.000Z
|
2021-12-11T03:23:12.000Z
|
diagnosis/models/__init__.py
|
victor-iyiola/diagnosis
|
0068fcd597d7607aee1723a551ba47e72047e098
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T00:36:16.000Z
|
2022-02-10T00:36:16.000Z
|
diagnosis/models/__init__.py
|
Naceine/diagnosis
|
0068fcd597d7607aee1723a551ba47e72047e098
|
[
"BSD-3-Clause"
] | 3
|
2020-06-04T15:50:08.000Z
|
2021-11-26T14:14:36.000Z
|
"""`diagnosis.models` - for instantiating models, including data pre-processing and loss functions.
@author
Victor I. Afolabi
Artificial Intelligence Expert & Researcher.
Email: javafolabi@gmail.com
GitHub: https://github.com/victor-iyiola
@project
File: __init__.py
Package: diagnosis.models
Created on 10 July, 2019 @ 02:25 PM.
@license
BSD-3 Clause license.
Copyright (c) 2019. Victor I. Afolabi. All rights reserved.
"""
from .docproduct import *
from . import bert
| 27.684211
| 99
| 0.690114
| 65
| 526
| 5.523077
| 0.8
| 0.083565
| 0.077994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036496
| 0.218631
| 526
| 19
| 100
| 27.684211
| 0.836983
| 0.8327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3978633c733811885535151c5b199d09d930dc86
| 3,365
|
py
|
Python
|
test/tests/binaryop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | 1
|
2020-11-26T23:37:19.000Z
|
2020-11-26T23:37:19.000Z
|
test/tests/binaryop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
test/tests/binaryop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
# output: ok
a, b, c, d, e, f = 1000, 1000, 1000, 1000, 1000, 1000
g, h, i, j, k, l = 2, 1000, 1000, 1000, 1000, 1000
a = a + 1
b = b - 2
c = c * 3
d = d / 4
e = e // 5
f = f % 6
g = g ** 7
h = h << 8
i = i >> 9
j = j & 10
k = k ^ 11
l = l | 12
assert a == 1001
assert b == 998
assert c == 3000
assert d == 250
assert e == 200
assert f == 4
assert g == 128
assert h == 256000
assert i == 1
assert j == 8
assert k == 995
assert l == 1004
class Wrapped:
def __init__(self, initial):
self.value = initial
def __add__(self, other):
return Wrapped(self.value + other)
def __sub__(self, other):
return Wrapped(self.value - other)
def __mul__(self, other):
return Wrapped(self.value * other)
def __truediv__(self, other):
return Wrapped(self.value / other)
def __floordiv__(self, other):
return Wrapped(self.value // other)
def __mod__(self, other):
return Wrapped(self.value % other)
def __pow__(self, other):
return Wrapped(self.value ** other)
def __lshift__(self, other):
return Wrapped(self.value << other)
def __rshift__(self, other):
return Wrapped(self.value >> other)
def __or__(self, other):
return Wrapped(self.value | other)
def __and__(self, other):
return Wrapped(self.value & other)
def __xor__(self, other):
return Wrapped(self.value ^ other)
def __radd__(self, other):
return other + self.value
def __rsub__(self, other):
return other - self.value
def __rmul__(self, other):
return other * self.value
def __rtruediv__(self, other):
return other / self.value
def __rfloordiv__(self, other):
return other // self.value
def __rmod__(self, other):
return other % self.value
def __rpow__(self, other):
return other ** self.value
def __rlshift__(self, other):
return other << self.value
def __rrshift__(self, other):
return other >> self.value
def __ror__(self, other):
return other | self.value
def __rand__(self, other):
return other & self.value
def __rxor__(self, other):
return other ^ self.value
a, b, c, d, e, f = Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000)
g, h, i, j, k, l = Wrapped(2), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000)
a = a + 1
b = b - 2
c = c * 3
d = d / 4
e = e // 5
f = f % 6
g = g ** 7
h = h << 8
i = i >> 9
j = j & 10
k = k ^ 11
l = l | 12
assert a.value == 1001
assert b.value == 998
assert c.value == 3000
assert d.value == 250
assert e.value == 200
assert f.value == 4
assert g.value == 128
assert h.value == 256000
assert i.value == 1
assert j.value == 8
assert k.value == 995
assert l.value == 1004
a, b, c, d, e, f = 1000, 1000, 1000, 1000, 1000, 1000
g, h, i, j, k, l = 2, 1000, 1000, 1000, 1000, 1000
a = a + Wrapped(1)
b = b - Wrapped(2)
c = c * Wrapped(3)
d = d / Wrapped(4)
e = e // Wrapped(5)
f = f % Wrapped(6)
g = g ** Wrapped(7)
h = h << Wrapped(8)
i = i >> Wrapped(9)
j = j & Wrapped(10)
k = k ^ Wrapped(11)
l = l | Wrapped(12)
assert a == 1001
assert b == 998
assert c == 3000
assert d == 250
assert e == 200
assert f == 4
assert g == 128
assert h == 256000
assert i == 1
assert j == 8
assert k == 995
assert l == 1004
print('ok')
| 23.531469
| 107
| 0.594651
| 541
| 3,365
| 3.513863
| 0.131238
| 0.118359
| 0.189374
| 0.138874
| 0.749605
| 0.749605
| 0.731194
| 0.542872
| 0.296686
| 0.296686
| 0
| 0.116505
| 0.265379
| 3,365
| 142
| 108
| 23.697183
| 0.652508
| 0.002972
| 0
| 0.4
| 0
| 0
| 0.000596
| 0
| 0
| 0
| 0
| 0
| 0.276923
| 1
| 0.192308
| false
| 0
| 0
| 0.184615
| 0.384615
| 0.007692
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
397956f488e4d6b404b647125e2db7eee5a5d84e
| 1,285
|
py
|
Python
|
utils/pattern/pattern_base.py
|
ElevenPaths/FARO
|
4d5585a1f08ce74baff3acf92668646dc9919439
|
[
"MIT"
] | 8
|
2020-04-17T11:35:14.000Z
|
2022-01-13T05:07:37.000Z
|
utils/pattern/pattern_base.py
|
ElevenPaths/FARO
|
4d5585a1f08ce74baff3acf92668646dc9919439
|
[
"MIT"
] | 1
|
2020-08-03T15:38:14.000Z
|
2020-08-03T15:38:14.000Z
|
utils/pattern/pattern_base.py
|
ElevenPaths/FARO
|
4d5585a1f08ce74baff3acf92668646dc9919439
|
[
"MIT"
] | 1
|
2020-09-28T02:50:34.000Z
|
2020-09-28T02:50:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.utils import clean_text
class PluginPatternBase:
def get_strict_regexp(self):
return self.dict_regex_strict
def get_lax_regexp(self):
return self.dict_regex_lax
def get_strict_entities(self):
return self._strict_entities
def get_consolidated_lax_entities(self):
return self._consolidated_entities
def get_unconsolidated_lax_entities(self):
return self._unconsolidated_entities
def get_plugin_path(self):
return self.plugin_path
@staticmethod
def _dict_to_regex_struct(_dict):
if not _dict:
return []
return [(k, v) for v, k in _dict.items()]
@staticmethod
def clean_entity(text):
return clean_text(text)
def strict_regexp(self):
pass
def lax_regexp(self):
pass
def validate(self, ent):
pass
def __init__(self, plugin_path, regex_lax, regex_strict):
self._strict_entities = {}
self._consolidated_entities = {}
self._unconsolidated_entities = {}
self.dict_regex_lax = self._dict_to_regex_struct(regex_lax)
self.dict_regex_strict = self._dict_to_regex_struct(regex_strict)
self.plugin_path = plugin_path
| 24.245283
| 73
| 0.673152
| 162
| 1,285
| 4.932099
| 0.265432
| 0.045056
| 0.105131
| 0.082603
| 0.20025
| 0.137672
| 0
| 0
| 0
| 0
| 0
| 0.001034
| 0.247471
| 1,285
| 52
| 74
| 24.711538
| 0.825233
| 0.032685
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.342857
| false
| 0.085714
| 0.028571
| 0.2
| 0.657143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
398a169253e6e1ec5b4e70385c86d5d011ee9f49
| 208
|
py
|
Python
|
ch01_intro/p1.py
|
mikeckennedy/gk_python_demos
|
e9a81c5a0775f0ad368843ccec434c5b1d588d71
|
[
"MIT"
] | null | null | null |
ch01_intro/p1.py
|
mikeckennedy/gk_python_demos
|
e9a81c5a0775f0ad368843ccec434c5b1d588d71
|
[
"MIT"
] | null | null | null |
ch01_intro/p1.py
|
mikeckennedy/gk_python_demos
|
e9a81c5a0775f0ad368843ccec434c5b1d588d71
|
[
"MIT"
] | null | null | null |
print("About to import p2")
from p2 import get_user_name
print("Done importing p2")
print("Downloading data, we'll save it to your profile")
name = get_user_name()
print("Using /Users/{}/data".format(name))
| 26
| 56
| 0.740385
| 35
| 208
| 4.285714
| 0.628571
| 0.093333
| 0.146667
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.120192
| 208
| 8
| 57
| 26
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0.488038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
3993b323c7167ee26363ee489ca98fb1151fe937
| 25
|
py
|
Python
|
rasa/version.py
|
yurilq/rasa01
|
315f0878cf66c39143bc298acd665a05bff8510b
|
[
"Apache-2.0"
] | 1
|
2020-12-19T10:17:18.000Z
|
2020-12-19T10:17:18.000Z
|
rasa/version.py
|
yurilq/rasa01
|
315f0878cf66c39143bc298acd665a05bff8510b
|
[
"Apache-2.0"
] | null | null | null |
rasa/version.py
|
yurilq/rasa01
|
315f0878cf66c39143bc298acd665a05bff8510b
|
[
"Apache-2.0"
] | 1
|
2020-12-19T10:10:55.000Z
|
2020-12-19T10:10:55.000Z
|
__version__ = "1.0.0rc9"
| 12.5
| 24
| 0.68
| 4
| 25
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0.12
| 25
| 1
| 25
| 25
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3995ab10ac9e94f32b4f07e6e1ab7695d20eaf1d
| 27
|
py
|
Python
|
homeassistant/components/solax/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/solax/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/solax/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""The solax component."""
| 13.5
| 26
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.708333
| 0.740741
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
39a8586fc20a644391e1e4e582809d826e0ec684
| 263
|
py
|
Python
|
pivot/tests.py
|
uw-it-aca/pivot
|
a3e404b30f4d2ebf37608d7c4595edb667df0222
|
[
"Apache-2.0"
] | 4
|
2017-11-07T23:49:45.000Z
|
2019-11-26T10:59:50.000Z
|
pivot/tests.py
|
uw-it-aca/pivot
|
a3e404b30f4d2ebf37608d7c4595edb667df0222
|
[
"Apache-2.0"
] | 204
|
2017-02-08T00:12:51.000Z
|
2022-02-02T23:52:46.000Z
|
pivot/tests.py
|
uw-it-aca/pivot
|
a3e404b30f4d2ebf37608d7c4595edb667df0222
|
[
"Apache-2.0"
] | 2
|
2018-03-20T22:41:27.000Z
|
2019-07-10T21:40:36.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
Tests for the pivot app.
"""
from pivot.test.context import ContextProcessorTest
from pivot.test.csvapi import CsvDataApiTest
from pivot.test.misc import PivotConfigTest
| 23.909091
| 51
| 0.798479
| 36
| 263
| 5.833333
| 0.75
| 0.128571
| 0.185714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.117871
| 263
| 10
| 52
| 26.3
| 0.87931
| 0.410646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
39c55ce1e035c65cad05929f12d3c687e6db616c
| 190
|
py
|
Python
|
accounts/admin.py
|
JAYBLA/mobile_payments
|
9fcd7b4b3f894e14225fd1ebefc31a9e18c11112
|
[
"MIT"
] | 4
|
2022-01-03T16:26:42.000Z
|
2022-01-04T02:55:52.000Z
|
accounts/admin.py
|
JAYBLA/mobile_payments
|
9fcd7b4b3f894e14225fd1ebefc31a9e18c11112
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
JAYBLA/mobile_payments
|
9fcd7b4b3f894e14225fd1ebefc31a9e18c11112
|
[
"MIT"
] | 1
|
2022-01-16T21:41:20.000Z
|
2022-01-16T21:41:20.000Z
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from .models import Profile
User = get_user_model()
admin.site.register(User)
admin.site.register(Profile)
| 19
| 46
| 0.815789
| 29
| 190
| 5.206897
| 0.448276
| 0.13245
| 0.225166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 190
| 9
| 47
| 21.111111
| 0.888235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
39c7048fd0f5c222bf39ed309f3ba33618945132
| 324
|
py
|
Python
|
Validation/CaloTowers/python/CaloTowersClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Validation/CaloTowers/python/CaloTowersClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Validation/CaloTowers/python/CaloTowersClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
calotowersClient = DQMEDHarvester("CaloTowersClient",
# outputFile = cms.untracked.string('CaloTowersHarvestingME.root'),
outputFile = cms.untracked.string(''),
DQMDirName = cms.string("/") # root directory
)
| 36
| 71
| 0.765432
| 30
| 324
| 8.266667
| 0.6
| 0.241935
| 0.177419
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 324
| 8
| 72
| 40.5
| 0.873239
| 0.246914
| 0
| 0
| 0
| 0
| 0.07173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f2d0de465a55797339086f1d035cb9f4d7c4a0d2
| 186
|
py
|
Python
|
scripts/training-data-processing/vector_utilities/utils_readwrite.py
|
DDS-Lab/disaster-image-processing
|
5d87e858c8d933bf4d5288061d121129fadb863d
|
[
"MIT"
] | 10
|
2018-11-07T14:39:15.000Z
|
2022-03-30T11:27:27.000Z
|
scripts/training-data-processing/vector_utilities/utils_readwrite.py
|
DDS-Lab/disaster-image-processing
|
5d87e858c8d933bf4d5288061d121129fadb863d
|
[
"MIT"
] | 1
|
2019-09-24T07:22:33.000Z
|
2019-09-24T07:22:33.000Z
|
scripts/training-data-processing/vector_utilities/utils_readwrite.py
|
DDS-Lab/disaster-image-processing
|
5d87e858c8d933bf4d5288061d121129fadb863d
|
[
"MIT"
] | 5
|
2018-08-02T21:25:30.000Z
|
2020-08-08T18:52:50.000Z
|
import geopandas as gpd
def load_file(file_path):
shape = gpd.GeoDataFrame(gpd.read_file(file_path))
return shape
def writeFile(shape, new_file):
shape.to_file(new_file)
| 16.909091
| 54
| 0.741935
| 29
| 186
| 4.517241
| 0.517241
| 0.122137
| 0.183206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 186
| 10
| 55
| 18.6
| 0.845161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f2d93bd98441081a60423726cda89abcc0a84869
| 180
|
py
|
Python
|
scripts/deploy.py
|
banteg/pink-duck
|
f770f80fe64589949a2f8319f57f77db5ac825f4
|
[
"MIT"
] | 3
|
2020-12-08T23:03:47.000Z
|
2020-12-09T04:11:42.000Z
|
scripts/deploy.py
|
banteg/pink-duck
|
f770f80fe64589949a2f8319f57f77db5ac825f4
|
[
"MIT"
] | null | null | null |
scripts/deploy.py
|
banteg/pink-duck
|
f770f80fe64589949a2f8319f57f77db5ac825f4
|
[
"MIT"
] | 1
|
2021-09-06T02:28:24.000Z
|
2021-09-06T02:28:24.000Z
|
import click
from brownie import Duck, accounts
def main():
user = accounts.load(click.prompt("account", type=click.Choice(accounts.load())))
Duck.deploy({"from": user})
| 22.5
| 85
| 0.7
| 24
| 180
| 5.25
| 0.625
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 180
| 7
| 86
| 25.714286
| 0.812903
| 0
| 0
| 0
| 0
| 0
| 0.061111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f2f43f23669b3672ddf80e446c7b0b4ee962e966
| 365
|
py
|
Python
|
tips_and_tricks/models.py
|
immanuelnadeak/temenin-isoman
|
8a497f7defe402820045ab9b748a447fc2e8a5f9
|
[
"MIT"
] | 1
|
2021-11-04T08:11:18.000Z
|
2021-11-04T08:11:18.000Z
|
tips_and_tricks/models.py
|
immanuelnadeak/temenin-isoman
|
8a497f7defe402820045ab9b748a447fc2e8a5f9
|
[
"MIT"
] | 1
|
2021-11-05T03:52:27.000Z
|
2021-11-05T03:52:27.000Z
|
tips_and_tricks/models.py
|
immanuelnadeak/temenin-isoman
|
8a497f7defe402820045ab9b748a447fc2e8a5f9
|
[
"MIT"
] | 4
|
2021-10-10T12:55:28.000Z
|
2021-10-29T18:27:42.000Z
|
from django.db import models
# Create your models here.
class TipsAndTrick(models.Model):
title = models.CharField(max_length=500)
source = models.CharField(max_length=500)
published_date = models.DateField()
brief_description = models.TextField()
image_url = models.CharField(max_length=500)
article_url = models.CharField(max_length=500)
| 36.5
| 50
| 0.756164
| 47
| 365
| 5.702128
| 0.553191
| 0.223881
| 0.268657
| 0.358209
| 0.425373
| 0.223881
| 0
| 0
| 0
| 0
| 0
| 0.038585
| 0.147945
| 365
| 10
| 50
| 36.5
| 0.823151
| 0.065753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f2f7b0ac58a95ec874e2b2da8533825417fb71d4
| 864
|
py
|
Python
|
tests/test_methods/test_chat.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | 2
|
2020-09-24T00:07:13.000Z
|
2020-09-27T19:27:06.000Z
|
tests/test_methods/test_chat.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_methods/test_chat.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_chat_delete(slack_time):
assert slack_time.chat.delete
def test_chat_delete_scheduled_message(slack_time):
assert slack_time.chat.delete_scheduled_message
def test_chat_get_permalink(slack_time):
assert slack_time.chat.get_permalink
def test_chat_me_message(slack_time):
assert slack_time.chat.me_message
def test_chat_post_ephemeral(slack_time):
assert slack_time.chat.post_ephemeral
def test_chat_post_message(slack_time):
assert slack_time.chat.post_message
def test_chat_schedule_message(slack_time):
assert slack_time.chat.schedule_message
def test_chat_unfurl(slack_time):
assert slack_time.chat.unfurl
def test_chat_update(slack_time):
assert slack_time.chat.update
def test_chat_scheduled_messages_list(slack_time):
assert slack_time.chat.scheduled_messages.list
| 20.571429
| 51
| 0.809028
| 131
| 864
| 4.900763
| 0.167939
| 0.280374
| 0.17134
| 0.311526
| 0.510903
| 0.510903
| 0.336449
| 0
| 0
| 0
| 0
| 0.001318
| 0.121528
| 864
| 41
| 52
| 21.073171
| 0.844532
| 0.024306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8414af8a79e61004889d24a623e136c739d04808
| 103
|
py
|
Python
|
scripts/portal/market34.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 9
|
2021-04-26T11:59:29.000Z
|
2021-12-20T13:15:27.000Z
|
scripts/portal/market34.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/portal/market34.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 6
|
2021-07-14T06:32:05.000Z
|
2022-02-06T02:32:56.000Z
|
# Stump Town (866000000) => Free Market
sm.setReturnField()
sm.setReturnPortal()
sm.warp(910000000, 36)
| 25.75
| 39
| 0.757282
| 13
| 103
| 6
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215054
| 0.097087
| 103
| 4
| 40
| 25.75
| 0.623656
| 0.359223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
845a836f69b54690c33dacca21531c5039908fe7
| 112
|
py
|
Python
|
fastapi/core/models/images.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
fastapi/core/models/images.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
fastapi/core/models/images.py
|
ilDug/docker-utils
|
6580e916a8c2c0d91f2e3da52a9d839507569bb7
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class ImageB64UploadRequestModel(BaseModel):
filename: str
base64: str
| 16
| 44
| 0.776786
| 11
| 112
| 7.909091
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.178571
| 112
| 6
| 45
| 18.666667
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
ffca8e9bbe49907d8307566fdfef601a37da7a91
| 60
|
py
|
Python
|
dlhub_cli/__init__.py
|
DLHub-Argonne/dlhub_cli
|
4193683c92b9be546588b8e3ac419c5efecd8ef7
|
[
"Apache-2.0"
] | 6
|
2018-10-19T03:29:03.000Z
|
2021-07-09T20:28:37.000Z
|
dlhub_cli/__init__.py
|
DLHub-Argonne/dlhub_cli
|
4193683c92b9be546588b8e3ac419c5efecd8ef7
|
[
"Apache-2.0"
] | 13
|
2018-11-29T15:23:25.000Z
|
2019-05-08T20:36:17.000Z
|
dlhub_cli/__init__.py
|
DLHub-Argonne/dlhub_cli
|
4193683c92b9be546588b8e3ac419c5efecd8ef7
|
[
"Apache-2.0"
] | 1
|
2019-03-06T15:52:47.000Z
|
2019-03-06T15:52:47.000Z
|
from dlhub_cli.main import cli_root
__all__ = ['cli_root']
| 15
| 35
| 0.766667
| 10
| 60
| 3.9
| 0.7
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 60
| 3
| 36
| 20
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
08134ca88bcdd23eae89c5b9e67c3e8816771d05
| 29,060
|
py
|
Python
|
vscworkflows/workflows/core.py
|
zezhong-zhang/vsc-workflows
|
83fbd534592eac973f4390bead670809cb4e47eb
|
[
"MIT"
] | null | null | null |
vscworkflows/workflows/core.py
|
zezhong-zhang/vsc-workflows
|
83fbd534592eac973f4390bead670809cb4e47eb
|
[
"MIT"
] | null | null | null |
vscworkflows/workflows/core.py
|
zezhong-zhang/vsc-workflows
|
83fbd534592eac973f4390bead670809cb4e47eb
|
[
"MIT"
] | 1
|
2019-11-14T14:55:23.000Z
|
2019-11-14T14:55:23.000Z
|
# coding: utf8
# Copyright (c) Marnik Bercx, University of Antwerp
# Distributed under the terms of the MIT License
import os
import numpy as np
from string import ascii_lowercase
from fireworks import Workflow
from monty.serialization import loadfn
from pymatgen.core.surface import Slab, SlabGenerator
from vscworkflows.misc import QSlab
from vscworkflows.fireworks.core import StaticFW, OptimizeFW, OpticsFW, \
SlabOptimizeFW, SlabDosFW
"""
Definition of all workflows in the package.
"""
__author__ = "Marnik Bercx"
__copyright__ = "Copyright 2019, Marnik Bercx, University of Antwerp"
__version__ = "pre-alpha"
__maintainer__ = "Marnik Bercx"
__email__ = "marnik.bercx@uantwerpen.be"
__date__ = "Jun 2019"
MODULE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../setup/set_configs"
)
def _load_yaml_config(filename):
config = loadfn(os.path.join(MODULE_DIR, "%s.yaml" % filename))
return config
def _set_up_relative_directory(directory, functional, calculation):
# Set up a calculation directory for a specific functional and calculation
directory = os.path.join(os.path.abspath(directory), functional[0])
if functional[0] == "pbeu":
directory += "_" + "".join(k + str(functional[1]["LDAUU"][k]) for k
in functional[1]["LDAUU"].keys())
directory += "_" + calculation
return directory
def _set_up_functional_params(functional):
"""
Set up the vasp_input_params based on the functional and some other conventions.
Args:
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used ("pbe", "hse06",
...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
Returns:
dict: dictionary with the standard vasp input parameters.
"""
vasp_input_params = {"user_incar_settings": {}}
# Set up the functional
if functional[0] != "pbe":
functional_config = _load_yaml_config(functional[0] + "Set")
functional_config["INCAR"].update(functional[1])
vasp_input_params["user_incar_settings"].update(functional_config["INCAR"])
return vasp_input_params
def get_wf_optimize(structure, directory, functional=("pbe", {}),
is_metal=False, in_custodian=False, number_nodes=None,
auto_parallelization=False):
"""
Set up a geometry optimization workflow for a bulk structure.
Args:
structure (Structure): Input Geometry.
directory (str): Directory in which the geometry optimization should be
performed.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used ("pbe", "hse06",
...),
whereas the second element contains a dictionary that allows the user
to specify additional INCAR tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian (0.05 eV) to second
order Methfessel-Paxton of 0.2 eV.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
auto_parallelization (bool): Automatically parallelize the calculation
using the VaspParallelizationTask.
"""
# Add number of nodes to spec, or "none"
if number_nodes is not None and number_nodes != 0:
spec = {"_fworker": str(number_nodes) + "nodes"}
else:
spec = {}
# --> Set up the geometry optimization
vasp_input_params = _set_up_functional_params(functional)
spec.update({"_launch_dir": directory})
# For metals, use Methfessel Paxton smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 2, "SIGMA": 0.2}
)
# Set up the geometry optimization Firework
optimize_fw = OptimizeFW(structure=structure,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec,
auto_parallelization=auto_parallelization)
# Set up a clear name for the workflow
workflow_name = str(structure.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(functional)
# Create the workflow
return Workflow(fireworks=[optimize_fw, ],
name=workflow_name)
def get_wf_energy(structure, directory, functional=("pbe", {}),
is_metal=False, in_custodian=False, number_nodes=None):
"""
Set up an accurate energy workflow for a bulk structure. Starts by optimizing
the geometry and then does a static calculation.
Args:
structure (Structure): Input geometry.
directory (str): Directory in which the workflow should be set up.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used ("pbe", "hse06",
...),
whereas the second element contains a dictionary that allows the user
to specify the various functional INCAR tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing of the geometry optimization from
Gaussian ( 0.05 eV) to second order Methfessel-Paxton of 0.2 eV.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
"""
# Add number of nodes to spec, or "none"
if number_nodes is not None and number_nodes != 0:
spec = {"_fworker": str(number_nodes) + "nodes"}
else:
spec = {}
# --> Set up the geometry optimization
vasp_input_params = _set_up_functional_params(functional)
spec.update(
{"_launch_dir": _set_up_relative_directory(directory, functional,
"optimize"),
"_pass_job_info": True})
# For metals, use Methfessel Paxton smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 2, "SIGMA": 0.2}
)
# Set up the geometry optimization Firework
optimize_fw = OptimizeFW(structure=structure,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec)
# -> Set up the static calculation
vasp_input_params = _set_up_functional_params(functional)
spec.update({"_launch_dir": _set_up_relative_directory(directory, functional,
"static")})
# Set up the static Firework
static_fw = StaticFW(vasp_input_params=vasp_input_params,
parents=optimize_fw,
custodian=in_custodian,
spec=spec)
# Set up a clear name for the workflow
workflow_name = str(structure.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(functional)
# Create the workflow
return Workflow(fireworks=[optimize_fw, static_fw],
name=workflow_name)
def get_wf_optics(structure, directory, functional=("pbe", {}), k_resolution=None,
is_metal=False, user_incar_settings=None, in_custodian=False,
number_nodes=None, auto_parallelization=False):
"""
Set up a workflow to calculate the frequency dependent dielectric matrix.
Starts with a geometry optimization.
Args:
structure (Structure): Input geometry.
directory (str): Directory in which the workflow should be set up.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used ("pbe", "hse06",
...),
whereas the second element contains a dictionary that allows the user
to specify the various functional INCAR tags.
k_resolution (float): Resolution of the k-mesh, i.e. distance between two
k-points along each reciprocal lattice vector.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian (0.05 eV) to second
order Methfessel-Paxton of 0.2 eV; the optics calculation will use a
generous gaussian smearing of 0.3 eV instead of the tetrahedron method.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species, or specify parallelization settings (
KPAR, NPAR, ...). Note that the settings specified here will
override the INCAR settings for ALL fireworks of the workflow.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
auto_parallelization (bool): Automatically parallelize the calculation
using the VaspParallelizationTask.
"""
# Add number of nodes to spec, or "none"
if number_nodes is not None and number_nodes != 0:
spec = {"_fworker": str(number_nodes) + "nodes"}
else:
spec = {}
# 1. Set up the geometry optimization
vasp_input_params = _set_up_functional_params(functional)
spec.update(
{"_launch_dir": _set_up_relative_directory(directory, functional,
"optimize")})
# For metals, use Methfessel Paxton smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 2, "SIGMA": 0.2}
)
# Override the INCAR settings with the user specifications
if user_incar_settings is not None:
vasp_input_params["user_incar_settings"].update(user_incar_settings)
# Set up the Firework
optimize_fw = OptimizeFW(structure=structure,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec,
auto_parallelization=auto_parallelization)
# 2. Set up the optics calculation
vasp_input_params = _set_up_functional_params(functional)
spec.update({"_launch_dir": _set_up_relative_directory(directory, functional,
"optics")})
# For metals, use a good amount of Gaussian smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 0, "SIGMA": 0.3}
)
k_resolution = k_resolution or 0.05
else:
k_resolution = k_resolution or 0.1
vasp_input_params["user_kpoints_settings"] = {"k_resolution": k_resolution}
# Override the INCAR settings with the user specifications
if user_incar_settings is not None:
vasp_input_params["user_incar_settings"].update(user_incar_settings)
# Set up the geometry optimization Firework
optics_fw = OpticsFW(
parents=optimize_fw,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec,
auto_parallelization=auto_parallelization
)
# Set up a clear name for the workflow
workflow_name = str(structure.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(functional)
# Create the workflow
return Workflow(fireworks=[optimize_fw, optics_fw],
links_dict={optimize_fw: [optics_fw]},
name=workflow_name)
def get_wf_slab_optimize(slab, directory, user_slab_settings,
functional=("pbe", {}), is_metal=False,
in_custodian=False, number_nodes=None):
"""
Set up a slab geometry optimization workflow.
Args:
slab (Qslab): Slab for which to set up the geometry optimization workflow.
directory (str): Directory in which the geometry optimization should be
performed.
user_slab_settings (dict): Allows the user to specify the selective
dynamics of the slab geometry optimization. These are passed to
the SlabOptimizeSet.fix_slab_bulk() commands as kwargs.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used
("pbe", "hse06", ...), whereas the second element contains a
dictionary that allows the user to specify the various functional
INCAR tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing of the geometry optimization from
Gaussian ( 0.05 eV) to second order Methfessel-Paxton of 0.2 eV.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
"""
# Add number of nodes to spec, or "none"
if number_nodes is not None and number_nodes != 0:
spec = {"_fworker": str(number_nodes) + "nodes"}
else:
spec = {}
# Set up the geometry optimization Firework
vasp_input_params = _set_up_functional_params(functional)
spec.update({"_launch_dir": directory})
# For metals, use Methfessel Paxton smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 2, "SIGMA": 0.2}
)
optimize_fw = SlabOptimizeFW(slab=slab,
user_slab_settings=user_slab_settings,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec)
# Set up a clear name for the workflow
workflow_name = str(slab.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(slab.miller_index)
workflow_name += " " + str(functional)
# Create the workflow
return Workflow(fireworks=[optimize_fw, ],
name=workflow_name)
def get_wf_slab_dos(slab, directory, user_slab_settings=None,
functional=("pbe", {}), k_resolution=0.1,
calculate_locpot=False, is_metal=False,
user_incar_settings=None, in_custodian=False,
number_nodes=None, auto_parallelization=False):
"""
Set up a slab DOS workflow. Starts with a geometry optimization.
Args:
slab (Qslab): Slab for which to set up the DOS workflow.
directory (str): Directory in which the workflow should be set up.
user_slab_settings (dict): Allows the user to specify the selective
dynamics of the slab geometry optimization. These are passed to
the SlabOptimizeSet.fix_slab_bulk() commands as kwargs.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used
("pbe", "hse06", ...), whereas the second element contains a
dictionary that allows the user to specify the various functional
INCAR tags.
k_resolution (float): Resolution of the k-mesh, i.e. distance between two
k-points along each reciprocal lattice vector. Note that for a slab
calculation we always only consider one point in the c-direction.
calculate_locpot (bool): Whether to calculate the the local potential,
e.g. to determine the work function.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing of the geometry optimization from
Gaussian ( 0.05 eV) to second order Methfessel-Paxton of 0.2 eV.
user_incar_settings (dict): User INCAR settings. This allows a user to
override INCAR settings, e.g., setting a different MAGMOM for various
elements or species, or specify parallelization settings
(KPAR, NPAR, ...). Note that the settings specified here will
override the INCAR settings for ALL fireworks of the workflow.
in_custodian (bool): Flag that indicates whether the calculations should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
auto_parallelization (bool): Automatically parallelize the calculation
using the VaspParallelizationTask.
"""
# Add number of nodes to spec, or "none"
if number_nodes is not None and number_nodes != 0:
spec = {"_fworker": str(number_nodes) + "nodes"}
else:
spec = {}
# --> Set up the geometry optimization Firework
vasp_input_params = _set_up_functional_params(functional)
spec.update(
{"_launch_dir": _set_up_relative_directory(directory, functional,
"optimize")}
)
# For metals, use Methfessel Paxton smearing
if is_metal:
vasp_input_params["user_incar_settings"].update(
{"ISMEAR": 2, "SIGMA": 0.2}
)
if user_incar_settings is not None:
vasp_input_params["user_incar_settings"].update(user_incar_settings)
optimize_fw = SlabOptimizeFW(slab=slab,
user_slab_settings=user_slab_settings,
vasp_input_params=vasp_input_params,
custodian=in_custodian,
spec=spec,
auto_parallelization=auto_parallelization)
# --> Set up the DOS Firework
vasp_input_params = _set_up_functional_params(functional)
# Calculate the local potential if requested (e.g. for the work function)
if calculate_locpot:
vasp_input_params["user_incar_settings"].update(
{"LVTOT": True, "LVHAR": True}
)
spec.update(
{"_launch_dir": _set_up_relative_directory(directory, functional,
"dos")}
)
vasp_input_params["user_kpoints_settings"] = {"k_resolution": k_resolution}
if user_incar_settings is not None:
vasp_input_params["user_incar_settings"].update(user_incar_settings)
# Set up the geometry optimization Firework
dos_fw = SlabDosFW(
vasp_input_params=vasp_input_params,
parents=optimize_fw,
custodian=in_custodian,
spec=spec,
auto_parallelization=auto_parallelization
)
# Set up a clear name for the workflow
workflow_name = str(slab.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(slab.miller_index)
workflow_name += " " + str(functional)
# Create the workflow
return Workflow(fireworks=[optimize_fw, dos_fw], name=workflow_name)
def get_wf_quotas(bulk, slab_list, directory, functional=("pbe", {}),
k_resolution=0.05, is_metal=False,
in_custodian=False, number_nodes=None):
"""
Generate a full QUOTAS worfklow, i.e. one that:
1. Optimizes the bulk and calculates the optical properties.
2. Optimizes the slabs in 'slab_list' and then calculates the DOS and work
function.
Args:
bulk (Structure): Input bulk geometry.
slab_list (list): A list of dictionaries that specify the slabs to be
included, as well as the settings to be used for each slab. Here is
an overview of the mandatory keys:
"slab" - Can be either a QSlab or a list/str that specifies the
miller indices of the slab surface. In case only the miller
indices are provided, the user must also supply the
"min_slab_size" and "min_vacuum_size", which specify the
minimum thickness of the slab and vacuum layer in angstrom.
"user_slab_settings" (dict) - Settings that will be passed to the
slab optimization firework. The most important key of this
dict is "free_layers", which specifies the number of
surface layers to optimize.
directory (str): Directory in which the workflow should be set up.
functional (tuple): Tuple with the functional details. The first element
contains a string that indicates the functional used
("pbe", "hse06", ...), whereas the second element contains a
dictionary that allows the user to specify the various functional
INCAR tags.
k_resolution (float): Resolution of the k-mesh, i.e. distance
between two k-points along each reciprocal lattice vector. Note that
for a slab calculation we always only consider one point in the
c-direction.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian (0.05 eV) to second
order Methfessel-Paxton of 0.2 eV; the optics calculation will use a
generous gaussian smearing of 0.3 eV instead of the tetrahedron method.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_fworker` to the Firework spec, so
it is picked up by a Fireworker running in a job with the specified
number of nodes.
"""
fireworks = list()
# Set up the directory for the bulk calculations
bulk_dir = os.path.join(directory, "bulk")
fireworks.extend(get_wf_optics(
directory=bulk_dir, structure=bulk, functional=functional,
k_resolution=k_resolution, is_metal=is_metal,
in_custodian=in_custodian, number_nodes=number_nodes
).fws)
for slab_dict in slab_list:
if isinstance(slab_dict["slab"], Slab):
slab = slab_dict["slab"]
# Set up the directory for the slab calculations
slab_dir = "".join([str(c) for c in slab.miller_index])
slab_dir = os.path.join(directory, slab_dir)
fireworks.extend(get_wf_slab_dos(
slab=slab, directory=slab_dir, functional=functional,
k_resolution=k_resolution,
user_slab_settings=slab_dict["user_slab_settings"],
calculate_locpot=True, is_metal=is_metal, in_custodian=in_custodian,
number_nodes=number_nodes
).fws)
elif isinstance(slab_dict["slab"], list) \
or isinstance(slab_dict["slab"], str):
miller_index = [int(c) for c in slab_dict["slab"]]
try:
slabgen = SlabGenerator(
initial_structure=bulk,
miller_index=miller_index,
min_slab_size=slab_dict["min_slab_size"],
min_vacuum_size=slab_dict["min_vacuum_size"]
)
except KeyError:
raise ValueError("Either min_slab_size or min_vacuum_size were not"
"defined in the slab dictionary for " +
slab_dict["slab"] + ".")
slab_terminations = slabgen.get_slabs()
if len(slab_terminations) == 1:
slab = QSlab.from_slab(slab_terminations[0])
# Set up the directory for the slab calculations
slab_dir = "".join([str(c) for c in miller_index])
slab_dir = os.path.join(directory, slab_dir)
fireworks.extend(get_wf_slab_dos(
slab=slab, directory=slab_dir, functional=functional,
k_resolution=k_resolution,
user_slab_settings=slab_dict["user_slab_settings"],
calculate_locpot=True, is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes
).fws)
if len(slab_terminations) > 1:
print("Multiple slab terminations found. Adding workflow for each "
"termination...")
for slab, letter in zip(slab_terminations, ascii_lowercase):
slab = QSlab.from_slab(slab)
# Set up the directory for the slab calculations
slab_dir = "".join([str(c) for c in miller_index]) + "_" + letter
slab_dir = os.path.join(directory, slab_dir)
fireworks.extend(get_wf_slab_dos(
slab=slab, directory=slab_dir, functional=functional,
k_resolution=k_resolution,
user_slab_settings=slab_dict["user_slab_settings"],
calculate_locpot=True, is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes
).fws)
# Set up a clear name for the workflow
workflow_name = str(bulk.composition.reduced_formula).replace(" ", "")
workflow_name += " - QUOTAS - "
workflow_name += " " + str(functional)
return Workflow(fireworks=fireworks, name=workflow_name)
def get_wf_parallel(structure, directory, nodes, nbands=None,
functional=("pbe", {}), user_kpoints_settings=None,
user_incar_settings=None, handlers=None, cores_per_node=28,
kpar_range=None, min_npar=1):
# Set defaults
user_kpoints_settings = user_kpoints_settings or {"reciprocal_density": 300}
user_incar_settings = user_incar_settings or {}
handlers = handlers or []
n_cores = int(cores_per_node * nodes)
kpar_range = kpar_range or [1, n_cores]
fw_list = []
suitable_kpars = np.array(
[i for i in range(kpar_range[0], kpar_range[1] + 1)
if n_cores % i == 0]
)
for kpar in suitable_kpars:
cores_per_k = n_cores / kpar
suitable_npars = np.array(
[i for i in range(min_npar, int(cores_per_k) + 1)
if cores_per_k % i == 0]
)
if nbands is not None:
suitable_npars = [npar for npar in suitable_npars if nbands % npar == 0]
for npar in suitable_npars:
spec = {}
# Set up the static calculation
spec.update({"_launch_dir": os.path.join(
directory, str(nodes) + "nodes", str(kpar) + "kpar",
(str(npar) + "npar")
)})
spec.update({"_fworker": str(nodes) + "nodes"})
vasp_input_params = _set_up_functional_params(functional)
vasp_input_params["user_kpoints_settings"] = user_kpoints_settings
vasp_input_params["user_incar_settings"].update(user_incar_settings)
vasp_input_params["user_incar_settings"].update(
{"KPAR": kpar, "NPAR": npar})
vasp_input_params["force_gamma"] = True
# Set up the Firework and add it to the list
fw_list.append(
StaticFW(structure=structure,
vasp_input_params=vasp_input_params,
spec=spec,
custodian=handlers)
)
workflow_name = ("Parallel-Test: " + structure.composition.reduced_formula
+ " " + str(nodes) + "nodes.")
# Create the workflow
return Workflow(fireworks=fw_list, name=workflow_name)
| 43.115727
| 85
| 0.629732
| 3,500
| 29,060
| 5.04
| 0.103429
| 0.016156
| 0.040816
| 0.019388
| 0.772392
| 0.742177
| 0.714569
| 0.704252
| 0.694388
| 0.688889
| 0
| 0.005784
| 0.297935
| 29,060
| 673
| 86
| 43.179792
| 0.858837
| 0.426325
| 0
| 0.509434
| 0
| 0
| 0.078613
| 0.005633
| 0.003145
| 0
| 0
| 0
| 0
| 1
| 0.031447
| false
| 0.003145
| 0.025157
| 0
| 0.08805
| 0.003145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
082364d42cde77f34692a39b3f9d58f88475a8f2
| 210
|
py
|
Python
|
zestipy/__init__.py
|
akremin/M2FSreduce
|
42092f18aa1e5d7ad6f6528a395ee93e89165b30
|
[
"BSD-3-Clause"
] | null | null | null |
zestipy/__init__.py
|
akremin/M2FSreduce
|
42092f18aa1e5d7ad6f6528a395ee93e89165b30
|
[
"BSD-3-Clause"
] | null | null | null |
zestipy/__init__.py
|
akremin/M2FSreduce
|
42092f18aa1e5d7ad6f6528a395ee93e89165b30
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 11:15:36 2015
@author: kremin
"""
from . import io
from . import plotting_tools
from . import workflow_funcs
from . import z_est
from . import data_structures
| 17.5
| 35
| 0.709524
| 33
| 210
| 4.393939
| 0.757576
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075145
| 0.17619
| 210
| 12
| 36
| 17.5
| 0.763006
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
084a5ce782bef776e1ab0618cc0ac296ea2c360a
| 213
|
py
|
Python
|
sessao03/05_57-FuncoesPart4/aula57.py
|
Ruteski/CursoPythonOM
|
afe5ad5e50f903dab370be2b76966a81f07e51dd
|
[
"MIT"
] | null | null | null |
sessao03/05_57-FuncoesPart4/aula57.py
|
Ruteski/CursoPythonOM
|
afe5ad5e50f903dab370be2b76966a81f07e51dd
|
[
"MIT"
] | null | null | null |
sessao03/05_57-FuncoesPart4/aula57.py
|
Ruteski/CursoPythonOM
|
afe5ad5e50f903dab370be2b76966a81f07e51dd
|
[
"MIT"
] | null | null | null |
"""
escopo
"""
variavel = 'valor'
def func():
print(variavel)
def func2():
global variavel
variavel = 'outro valor'
print(variavel)
def func3():
print(variavel)
func()
func2()
func3()
| 8.52
| 28
| 0.596244
| 23
| 213
| 5.521739
| 0.434783
| 0.307087
| 0.251969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.248826
| 213
| 24
| 29
| 8.875
| 0.76875
| 0.028169
| 0
| 0.25
| 0
| 0
| 0.080402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f24994f3d3e6ec8a1a157412a4393a209bc38f6d
| 19
|
py
|
Python
|
exercises/en/exc_04_12.py
|
Lavendulaa/programming-in-python-for-data-science
|
bc41da8afacf4c180ae0ff9c6dc26a7e6292252f
|
[
"MIT"
] | 1
|
2020-06-26T20:15:44.000Z
|
2020-06-26T20:15:44.000Z
|
exercises/en/exc_04_12.py
|
Lavendulaa/programming-in-python-for-data-science
|
bc41da8afacf4c180ae0ff9c6dc26a7e6292252f
|
[
"MIT"
] | 20
|
2020-06-15T23:05:20.000Z
|
2020-09-01T22:07:45.000Z
|
exercises/en/exc_04_12.py
|
UBC-MDS/MCL-programming-in-python
|
22836d9013d3e3d1b1074678ba7dc3ee2e66f398
|
[
"MIT"
] | 1
|
2020-06-25T20:53:13.000Z
|
2020-06-25T20:53:13.000Z
|
____ = ____
# ____
| 6.333333
| 11
| 0.631579
| 0
| 19
| null | null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 19
| 3
| 12
| 6.333333
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f2a21b477e667dfda44e7f989d0c8fa3fa3aed8d
| 48,305
|
py
|
Python
|
django_harmonization/ui/models.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
django_harmonization/ui/models.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
django_harmonization/ui/models.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# for api-token-auth
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
# Most tables are NOT managed, TableColumn is
#class AttributeDefinition(models.Model):
# attribute_definition_id = models.IntegerField(primary_key=True)
# attribute_name = models.CharField(max_length=255)
# attribute_description = models.TextField(blank=True, null=True)
# attribute_type_concept_id = models.IntegerField()
# attribute_syntax = models.TextField(blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'attribute_definition'
#
#class CareSite(models.Model):
# care_site_id = models.IntegerField(primary_key=True)
# care_site_name = models.CharField(max_length=255, blank=True, null=True)
# place_of_service_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# location = models.ForeignKey('Location', models.DO_NOTHING, blank=True, null=True)
# care_site_source_value = models.CharField(max_length=50, blank=True, null=True)
# place_of_service_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'care_site'
class CategorizationFunctions(models.Model):
name = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'categorization_functions'
unique_together = (('name'),)
class MappingFunctions(models.Model):
name = models.CharField(max_length=100)
study_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'mapping_function'
unique_together = (('name'),)
class CategorizationFunctionMetadata(models.Model):
extract_study = models.ForeignKey('ExtractStudy', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
long_name = models.CharField(max_length=100)
rule_id = models.CharField(max_length=20)
from_vocabulary_id = models.CharField(max_length=100)
from_concept_code = models.CharField(max_length=100)
comment = models.CharField(max_length=256, blank=True, null=True)
from_table = models.CharField(max_length=20, blank=True, null=True)
short_name = models.CharField(max_length=6, blank=True, null=True)
class Meta:
managed = False
db_table = 'categorization_function_metadata'
unique_together = (('extract_study', 'function_name', 'long_name', 'rule_id'), ('extract_study', 'function_name', 'long_name', 'rule_id'),)
class CategorizationFunctionParameters(models.Model):
extract_study = models.ForeignKey('ExtractStudy', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
long_name = models.CharField(max_length=100)
rule_id = models.CharField(max_length=20)
value_limit = models.FloatField(blank=True, null=True)
rank = models.IntegerField()
from_string = models.CharField(max_length=20, blank=True, null=True)
from_concept_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'categorization_function_parameters'
unique_together = (('extract_study', 'function_name', 'long_name', 'rule_id', 'rank'),)
class CategorizationFunctionQualifiers(models.Model):
extract_study = models.ForeignKey('ExtractStudy', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
long_name = models.CharField(max_length=100)
rule_id = models.CharField(max_length=20)
vocabulary_id = models.CharField(max_length=100)
concept_code = models.CharField(max_length=100)
value_vocabulary_id = models.CharField(max_length=100, blank=True, null=True)
value_as_string = models.CharField(max_length=100, blank=True, null=True)
value_as_number = models.IntegerField(blank=True, null=True)
value_as_concept_id = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'categorization_function_qualifiers'
unique_together = (('extract_study', 'function_name', 'long_name', 'rule_id', 'vocabulary_id', 'concept_code'),)
class CategorizationFunctionTable(models.Model):
extract_study = models.ForeignKey('ExtractStudy', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
long_name = models.CharField(max_length=100)
from_table = models.ForeignKey('TableColumn', models.DO_NOTHING, db_column='from_table', blank=True, null=True)
from_column = models.CharField(max_length=100, blank=True, null=True)
from_vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, blank=True, null=True)
from_concept_code = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'categorization_function_table'
unique_together = (('extract_study', 'function_name', 'long_name'),)
#class CdmSource(models.Model):
# cdm_source_name = models.CharField(max_length=255)
# cdm_source_abbreviation = models.CharField(max_length=25, blank=True, null=True)
# cdm_holder = models.CharField(max_length=255, blank=True, null=True)
# source_description = models.TextField(blank=True, null=True)
# source_documentation_reference = models.CharField(max_length=255, blank=True, null=True)
# cdm_etl_reference = models.CharField(max_length=255, blank=True, null=True)
# source_release_date = models.DateField(blank=True, null=True)
# cdm_release_date = models.DateField(blank=True, null=True)
# cdm_version = models.CharField(max_length=10, blank=True, null=True)
# vocabulary_version = models.CharField(max_length=20, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'cdm_source'
#class Cohort(models.Model):
# cohort_definition = models.ForeignKey('CohortDefinition', models.DO_NOTHING, primary_key=True)
# subject_id = models.IntegerField()
# cohort_start_date = models.DateField()
# cohort_end_date = models.DateField()
#
# class Meta:
# managed = False
# db_table = 'cohort'
# unique_together = (('cohort_definition', 'subject_id', 'cohort_start_date', 'cohort_end_date'),)
#class CohortAttribute(models.Model):
# cohort_definition = models.ForeignKey('CohortDefinition', models.DO_NOTHING, primary_key=True)
# cohort_start_date = models.DateField()
# cohort_end_date = models.DateField()
# subject_id = models.IntegerField()
# attribute_definition = models.ForeignKey(AttributeDefinition, models.DO_NOTHING)
# value_as_number = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# value_as_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'cohort_attribute'
# unique_together = (('cohort_definition', 'subject_id', 'cohort_start_date', 'cohort_end_date', 'attribute_definition'),)
#class CohortDefinition(models.Model):
# cohort_definition_id = models.IntegerField(primary_key=True)
# cohort_definition_name = models.CharField(max_length=255)
# cohort_definition_description = models.TextField(blank=True, null=True)
# definition_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# cohort_definition_syntax = models.TextField(blank=True, null=True)
# subject_concept_id = models.IntegerField()
# cohort_initiation_date = models.DateField(blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'cohort_definition'
class Concept(models.Model):
concept_id = models.IntegerField(primary_key=True)
concept_name = models.CharField(max_length=255)
domain = models.ForeignKey('Domain', models.DO_NOTHING)
vocabulary = models.ForeignKey('Vocabulary', models.DO_NOTHING)
concept_class = models.ForeignKey('ConceptClass', models.DO_NOTHING)
standard_concept = models.CharField(max_length=1, blank=True, null=True)
concept_code = models.CharField(max_length=50)
valid_start_date = models.DateField()
valid_end_date = models.DateField()
invalid_reason = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'concept'
class ConceptAncestor(models.Model):
ancestor_concept_id = models.IntegerField(primary_key=True)
descendant_concept_id = models.IntegerField()
min_levels_of_separation = models.IntegerField()
max_levels_of_separation = models.IntegerField()
class Meta:
managed = False
db_table = 'concept_ancestor'
unique_together = (('ancestor_concept_id', 'descendant_concept_id'),)
class ConceptClass(models.Model):
concept_class_id = models.CharField(primary_key=True, max_length=20)
concept_class_name = models.CharField(max_length=255)
concept_class_concept = models.ForeignKey(Concept, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'concept_class'
class ConceptRelationship(models.Model):
concept_id_1 = models.IntegerField(primary_key=True)
concept_id_2 = models.IntegerField()
relationship = models.ForeignKey('Relationship', models.DO_NOTHING)
valid_start_date = models.DateField()
valid_end_date = models.DateField()
invalid_reason = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'concept_relationship'
unique_together = (('concept_id_1', 'concept_id_2', 'relationship'),)
class ConceptSynonym(models.Model):
concept = models.ForeignKey(Concept, models.DO_NOTHING)
concept_synonym_name = models.CharField(max_length=1000)
language_concept_id = models.IntegerField()
class Meta:
managed = False
db_table = 'concept_synonym'
#
#class ConditionEra(models.Model):
# condition_era_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# condition_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# condition_era_start_date = models.DateField()
# condition_era_end_date = models.DateField()
# condition_occurrence_count = models.IntegerField(blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'condition_era'
#class ConditionOccurrence(models.Model):
# condition_occurrence_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# condition_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# condition_start_date = models.DateField()
# condition_start_datetime = models.DateTimeField()
# condition_end_date = models.DateField(blank=True, null=True)
# condition_end_datetime = models.DateTimeField(blank=True, null=True)
# condition_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# stop_reason = models.CharField(max_length=20, blank=True, null=True)
# provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
# visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
# condition_source_value = models.CharField(max_length=50, blank=True, null=True)
# condition_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# condition_status_source_value = models.CharField(max_length=50, blank=True, null=True)
# condition_status_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'condition_occurrence'
#
#class Cost(models.Model):
# cost_id = models.IntegerField(primary_key=True)
# cost_event_id = models.IntegerField()
# cost_domain_id = models.CharField(max_length=20)
# cost_type_concept_id = models.IntegerField()
# currency_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# total_charge = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# total_cost = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# total_paid = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_by_payer = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_by_patient = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_patient_copay = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_patient_coinsurance = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_patient_deductible = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_by_primary = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_ingredient_cost = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# paid_dispensing_fee = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# payer_plan_period = models.ForeignKey('PayerPlanPeriod', models.DO_NOTHING, blank=True, null=True)
# amount_allowed = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# revenue_code_concept_id = models.IntegerField(blank=True, null=True)
# reveue_code_source_value = models.CharField(max_length=50, blank=True, null=True)
# drg_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# drg_source_value = models.CharField(max_length=3, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'cost'
class Death(models.Model):
person = models.ForeignKey('Person', models.DO_NOTHING, primary_key=True)
death_date = models.DateField()
death_datetime = models.DateTimeField(blank=True, null=True)
death_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'death_type_concept')
cause_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'cause_concept')
cause_source_value = models.CharField(max_length=50, blank=True, null=True)
cause_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'cause_source_concept')
class Meta:
managed = False
db_table = 'death'
#class DeviceExposure(models.Model):
# device_exposure_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# device_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# device_exposure_start_date = models.DateField()
# device_exposure_start_datetime = models.DateTimeField()
# device_exposure_end_date = models.DateField(blank=True, null=True)
# device_exposure_end_datetime = models.DateTimeField(blank=True, null=True)
# device_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# unique_device_id = models.CharField(max_length=50, blank=True, null=True)
# quantity = models.IntegerField(blank=True, null=True)
# provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
# visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
# device_source_value = models.CharField(max_length=100, blank=True, null=True)
# device_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'device_exposure'
class Domain(models.Model):
domain_id = models.CharField(primary_key=True, max_length=20)
domain_name = models.CharField(max_length=255)
domain_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'domain_concept')
class Meta:
managed = False
db_table = 'domain'
#class DoseEra(models.Model):
# dose_era_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# drug_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# unit_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# dose_value = models.DecimalField(max_digits=65535, decimal_places=65535)
# dose_era_start_date = models.DateField()
# dose_era_end_date = models.DateField()
#
# class Meta:
# managed = False
# db_table = 'dose_era'
#
#
#class DrugEra(models.Model):
# drug_era_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# drug_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# drug_era_start_date = models.DateField()
# drug_era_end_date = models.DateField()
# drug_exposure_count = models.IntegerField(blank=True, null=True)
# gap_days = models.IntegerField(blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'drug_era'
#
#
#class DrugExposure(models.Model):
# drug_exposure_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# drug_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# drug_exposure_start_date = models.DateField()
# drug_exposure_start_datetime = models.DateTimeField()
# drug_exposure_end_date = models.DateField()
# drug_exposure_end_datetime = models.DateTimeField(blank=True, null=True)
# verbatim_end_date = models.DateField(blank=True, null=True)
# drug_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# stop_reason = models.CharField(max_length=20, blank=True, null=True)
# refills = models.IntegerField(blank=True, null=True)
# quantity = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# days_supply = models.IntegerField(blank=True, null=True)
# sig = models.TextField(blank=True, null=True)
# route_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# lot_number = models.CharField(max_length=50, blank=True, null=True)
# provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
# visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
# drug_source_value = models.CharField(max_length=50, blank=True, null=True)
# drug_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# route_source_value = models.CharField(max_length=50, blank=True, null=True)
# dose_unit_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'drug_exposure'
#
#
#class DrugStrength(models.Model):
# drug_concept = models.ForeignKey(Concept, models.DO_NOTHING, primary_key=True)
# ingredient_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# amount_value = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# amount_unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# numerator_value = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# numerator_unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# denominator_value = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# denominator_unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# box_size = models.IntegerField(blank=True, null=True)
# valid_start_date = models.DateField()
# valid_end_date = models.DateField()
# invalid_reason = models.CharField(max_length=1, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'drug_strength'
# unique_together = (('drug_concept', 'ingredient_concept'),)
#
class EventsMapping(models.Model):
study = models.ForeignKey('Study', models.DO_NOTHING, primary_key=True)
from_table = models.ForeignKey('TableColumn', models.DO_NOTHING, db_column='from_table', related_name = 'from_table')
from_column = models.CharField(max_length=50)
to_table = models.ForeignKey('TableColumn', models.DO_NOTHING, db_column='to_table', related_name = 'to_table')
to_column = models.CharField(max_length=50)
value_vocabulary_id = models.CharField(max_length=50, blank=True, null=True)
value_concept_code = models.CharField(max_length=50, blank=True, null=True)
addl_column = models.CharField(max_length=50, blank=True, null=True)
addl_value = models.CharField(max_length=50, blank=True, null=True)
from_date_column = models.CharField(max_length=50, blank=True, null=True)
where_clause = models.CharField(max_length=256)
comment = models.CharField(max_length=256, blank=True, null=True)
class Meta:
managed = False
db_table = 'events_mapping'
unique_together = (('study', 'from_table', 'from_column', 'to_table', 'to_column', 'where_clause'),)
class ExtractStudy(models.Model):
extract_study_id = models.IntegerField(primary_key=True)
study_id = models.IntegerField(blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
comment = models.CharField(max_length=1000, blank=True, null=True)
class Meta:
managed = False
db_table = 'extract_study'
#class FactRelationship(models.Model):
# domain_concept_id_1 = models.ForeignKey(Concept, models.DO_NOTHING, db_column='domain_concept_id_1')
# fact_id_1 = models.IntegerField()
# domain_concept_id_2 = models.ForeignKey(Concept, models.DO_NOTHING, db_column='domain_concept_id_2')
# fact_id_2 = models.IntegerField()
# relationship_concept = models.ForeignKey(Concept, models.DO_NOTHING)
#
# class Meta:
# managed = False
# db_table = 'fact_relationship'
class Location(models.Model):
location_id = models.IntegerField(primary_key=True)
address_1 = models.CharField(max_length=50, blank=True, null=True)
address_2 = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
state = models.CharField(max_length=2, blank=True, null=True)
zip = models.CharField(max_length=9, blank=True, null=True)
county = models.CharField(max_length=20, blank=True, null=True)
location_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'location'
class Measurement(models.Model):
measurement_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
measurement_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'measurement_concept')
measurement_date = models.DateField()
measurement_datetime = models.DateTimeField(blank=True, null=True)
measurement_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'measurement_type_concept')
operator_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'operator_concept')
value_as_number = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
value_as_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_value_as_concept')
unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_unit_concept')
range_low = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
range_high = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
measurement_source_value = models.CharField(max_length=50, blank=True, null=True)
measurement_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_source_concept')
unit_source_value = models.CharField(max_length=50, blank=True, null=True)
value_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'measurement'
class Note(models.Model):
note_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
note_date = models.DateField()
note_datetime = models.DateTimeField(blank=True, null=True)
note_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'note_type_concept')
note_class_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'note_class_concept')
note_title = models.CharField(max_length=250, blank=True, null=True)
note_text = models.TextField()
encoding_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'encoding_concept')
language_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'language_concept')
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
note_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'note'
#class NoteNlp(models.Model):
# note_nlp_id = models.BigIntegerField(primary_key=True)
# note = models.ForeignKey(Note, models.DO_NOTHING)
# section_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# snippet = models.CharField(max_length=250, blank=True, null=True)
# offset = models.CharField(max_length=250, blank=True, null=True)
# lexical_variant = models.CharField(max_length=250)
# note_nlp_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# note_nlp_source_concept_id = models.IntegerField(blank=True, null=True)
# nlp_system = models.CharField(max_length=250, blank=True, null=True)
# nlp_date = models.DateField()
# nlp_datetime = models.DateTimeField(blank=True, null=True)
# term_exists = models.CharField(max_length=1, blank=True, null=True)
# term_temporal = models.CharField(max_length=50, blank=True, null=True)
# term_modifiers = models.CharField(max_length=2000, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'note_nlp'
class Observation(models.Model):
observation_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
observation_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'observation_concept')
observation_date = models.DateField()
observation_datetime = models.DateTimeField(blank=True, null=True)
observation_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'observation_type_concept')
value_as_number = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
value_as_string = models.CharField(max_length=60, blank=True, null=True)
value_as_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_value_as_concept')
qualifier_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_unit_concept')
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
observation_source_value = models.CharField(max_length=50, blank=True, null=True)
observation_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_source_concept')
unit_source_value = models.CharField(max_length=50, blank=True, null=True)
qualifier_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'observation'
#class ObservationPeriod(models.Model):
# observation_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# observation_period_start_date = models.DateField()
# observation_period_start_datetime = models.DateTimeField()
# observation_period_end_date = models.DateField()
# observation_period_end_datetime = models.DateTimeField()
# period_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
#
# class Meta:
# managed = False
# db_table = 'observation_period'
class OhdsiCalculationArgument(models.Model):
vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, related_name = 'vocabulary_concept')
concept_code = models.CharField(max_length=100)
study = models.ForeignKey('Study', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
argument_order = models.IntegerField(blank=True, null=True)
argument_name = models.CharField(max_length=30, blank=True, null=True)
value_field = models.CharField(max_length=20, blank=True, null=True)
to_concept_code = models.CharField(max_length=100)
to_vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, related_name = 'to_vocabulary_concept')
from_table = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'ohdsi_calculation_argument'
unique_together = (('study', 'function_name', 'to_concept_code', 'to_vocabulary', 'vocabulary', 'concept_code'),)
class OhdsiCalculationFunction(models.Model):
study = models.ForeignKey('Study', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
to_vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING)
to_concept_code = models.CharField(max_length=100)
to_table = models.CharField(max_length=100, blank=True, null=True)
to_column = models.CharField(max_length=100, blank=True, null=True)
function_order = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'ohdsi_calculation_function'
unique_together = (('study', 'function_name', 'to_concept_code', 'to_vocabulary'),)
#class PayerPlanPeriod(models.Model):
# payer_plan_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# payer_plan_period_start_date = models.DateField()
# payer_plan_period_end_date = models.DateField()
# payer_source_value = models.CharField(max_length=50, blank=True, null=True)
# plan_source_value = models.CharField(max_length=50, blank=True, null=True)
# family_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'payer_plan_period'
class Person(models.Model):
person_id = models.IntegerField(primary_key=True)
gender_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'gender_concept')
year_of_birth = models.IntegerField()
month_of_birth = models.IntegerField(blank=True, null=True)
day_of_birth = models.IntegerField(blank=True, null=True)
birth_datetime = models.DateTimeField(blank=True, null=True)
race_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'race_concept')
ethnicity_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'ethnicity_concept')
location = models.ForeignKey(Location, models.DO_NOTHING, blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
#care_site = models.ForeignKey(CareSite, models.DO_NOTHING, blank=True, null=True)
care_site = models.IntegerField(blank=True, null=True)
person_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'gender_source_concept')
race_source_value = models.CharField(max_length=50, blank=True, null=True)
race_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'race_source_concept')
ethnicity_source_value = models.CharField(max_length=50, blank=True, null=True)
ethnicity_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'ethnicity_source_concept')
class Meta:
managed = False
db_table = 'person'
class ProcedureOccurrence(models.Model):
procedure_occurrence_id = models.IntegerField(primary_key=True)
person = models.ForeignKey(Person, models.DO_NOTHING)
procedure_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'procedure_concept')
procedure_date = models.DateField()
procedure_datetime = models.DateTimeField()
procedure_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'procedure_type_concept')
modifier_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'modifier_concept')
quantity = models.IntegerField(blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
procedure_source_value = models.CharField(max_length=50, blank=True, null=True)
procedure_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'procedure_source_concept')
qualifier_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'procedure_occurrence'
#class Provider(models.Model):
# provider_id = models.IntegerField(primary_key=True)
# provider_name = models.CharField(max_length=255, blank=True, null=True)
# npi = models.CharField(max_length=20, blank=True, null=True)
# dea = models.CharField(max_length=20, blank=True, null=True)
# specialty_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# care_site = models.ForeignKey(CareSite, models.DO_NOTHING, blank=True, null=True)
# year_of_birth = models.IntegerField(blank=True, null=True)
# gender_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# provider_source_value = models.CharField(max_length=50, blank=True, null=True)
# specialty_source_value = models.CharField(max_length=50, blank=True, null=True)
# specialty_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# gender_source_value = models.CharField(max_length=50, blank=True, null=True)
# gender_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'provider'
class Relationship(models.Model):
relationship_id = models.CharField(primary_key=True, max_length=20)
relationship_name = models.CharField(max_length=255)
is_hierarchical = models.CharField(max_length=1)
defines_ancestry = models.CharField(max_length=1)
reverse_relationship = models.ForeignKey('self', models.DO_NOTHING)
relationship_concept = models.ForeignKey(Concept, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'relationship'
class SourceToConceptMap(models.Model):
source_code = models.CharField(max_length=50)
source_concept_id = models.IntegerField()
source_vocabulary = models.ForeignKey('Vocabulary', models.DO_NOTHING, primary_key=True, related_name = 'source_vocabulary')
source_code_description = models.CharField(max_length=255, blank=True, null=True)
target_concept = models.ForeignKey(Concept, models.DO_NOTHING)
target_vocabulary = models.ForeignKey('Vocabulary', models.DO_NOTHING, related_name = 'target_vocabulary')
valid_start_date = models.DateField()
valid_end_date = models.DateField()
invalid_reason = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'source_to_concept_map'
unique_together = (('source_vocabulary', 'target_concept', 'source_code', 'valid_end_date'),)
#
#class Specimen(models.Model):
# specimen_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey(Person, models.DO_NOTHING)
# specimen_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# specimen_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# specimen_date = models.DateField()
# specimen_datetime = models.DateTimeField(blank=True, null=True)
# quantity = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# anatomic_site_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# disease_status_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# specimen_source_id = models.CharField(max_length=50, blank=True, null=True)
# specimen_source_value = models.CharField(max_length=50, blank=True, null=True)
# unit_source_value = models.CharField(max_length=50, blank=True, null=True)
# anatomic_site_source_value = models.CharField(max_length=50, blank=True, null=True)
# disease_status_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'specimen'
#
class Study(models.Model):
study_id = models.IntegerField(primary_key=True)
study_name = models.CharField(max_length=100, blank=True, null=True)
person_id_range_start = models.IntegerField(blank=True, null=True)
person_id_range_end = models.IntegerField(blank=True, null=True)
observation_range_start = models.IntegerField(blank=True, null=True)
observation_range_end = models.IntegerField(blank=True, null=True)
loaded = models.BooleanField()
migrated = models.BooleanField()
calculated = models.BooleanField()
id_field_name = models.CharField(max_length=10, blank=True, null=True)
person_id_prefix = models.CharField(max_length=10, blank=True, null=True)
person_id_select = models.CharField(max_length=100, blank=True, null=True)
person_details_select = models.CharField(max_length=200, blank=True, null=True)
study_class = models.CharField(max_length=50, blank=True, null=True)
sex_table_name = models.CharField(max_length=100, blank=True, null=True)
sex_column_name = models.CharField(max_length=100, blank=True, null=True)
sex_function_name = models.CharField(max_length=100, blank=True, null=True)
race_table_name = models.CharField(max_length=100, blank=True, null=True)
race_column_name = models.CharField(max_length=100, blank=True, null=True)
race_function_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'study'
class TableColumn(models.Model):
table_column_id = models.IntegerField(primary_key=True)
study = models.ForeignKey(Study, models.DO_NOTHING, blank=True, null=True)
table_name = models.CharField(max_length=50)
column_name = models.CharField(max_length=50)
class Meta:
managed = False
db_table = 'table_column'
unique_together = (('table_name', 'column_name'),)
#class StudyConceptType(models.Model):
# study_id = models.IntegerField(primary_key=True)
# study_concept_type_id = models.AutoField()
# name = models.CharField(max_length=50)
# filename = models.CharField(max_length=50)
# description = models.CharField(max_length=1000, blank=True, null=True)
# considered = models.NullBooleanField()
#
# class Meta:
# managed = False
# db_table = 'study_concept_type'
# unique_together = (('study_id', 'study_concept_type_id'),)
#
#
#class StudyConceptValue(models.Model):
# study = models.ForeignKey(StudyConceptType, models.DO_NOTHING, primary_key=True)
# study_concept_type_id = models.IntegerField()
# study_concept_value_id = models.AutoField()
# name = models.CharField(max_length=250)
# description = models.CharField(max_length=1000, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'study_concept_value'
# unique_together = (('study', 'study_concept_type_id', 'study_concept_value_id'),)
class StudyToOhdsiMapping(models.Model):
study = models.ForeignKey(Study, models.DO_NOTHING)
# WTF, TODO
#from_table = models.ForeignKey('TableColumn', models.DO_NOTHING, db_column='from_table', related_name = 'study_from_table')
from_table = models.CharField(max_length=100)
from_column = models.CharField(max_length=100)
function_name = models.CharField(max_length=100, blank=True, null=True)
vocabulary_id = models.CharField(max_length=100)
concept_code = models.CharField(max_length=100, blank=True, null=True)
# should be a single composite key, but django doesn't support that and the demo's due. TODO
# https://pypi.python.org/pypi/django-composite-foreignkey
#vocabulary_id = models.ForeignKey('Concept', models.DO_NOTHING, blank=True, null=True)
#concept_code = models.ForeignKey('Concept', models.DO_NOTHING, blank=True, null=True)
# TODO I think, it's OK. We need the FK in the database, but not here?? Can't traverse it here wihtout this,
# but the FK in the database ensures we're not making up concepts.
#vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, blank=True, null=True)
#concept_code = models.ForeignKey('VocabularyConcept', models.DO_NOTHHING, blank=True, null=True)
#to_table = models.ForeignKey('TableColumn', models.DO_NOTHING, db_column='to_table', blank=True, null=True, related_name = 'ohdsi_to_table')
to_table = models.CharField(max_length=100, blank=True, null=True)
to_column = models.CharField(max_length=100, blank=True, null=True)
addl_value_1 = models.CharField(max_length=20, blank=True, null=True)
addl_column_1 = models.CharField(max_length=20, blank=True, null=True)
from_where_clause = models.CharField(max_length=100, blank=True, null=True)
comment = models.CharField(max_length=250, blank=True, null=True)
from_where_column = models.CharField(max_length=100, blank=True, null=True)
units = models.CharField(max_length=20, blank=True, null=True)
has_date = models.NullBooleanField()
class Meta:
managed = False
db_table = 'study_to_ohdsi_mapping'
def __str__(self):
return "table:{} coln:{} fun:{} v:{} c:{} table:{} column:{} where:{} where:{} has_date:{}".format(
self.from_table, self.from_column, self.function_name, self.vocabulary_id,
self.concept_code, self.to_table, self.to_column, self.from_where_column, self.from_where_clause, self.has_date)
class VisitOccurrence(models.Model):
visit_occurrence_id = models.IntegerField(primary_key=True)
person = models.ForeignKey(Person, models.DO_NOTHING)
visit_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'visit_concept')
visit_start_date = models.DateField()
visit_start_datetime = models.DateTimeField(blank=True, null=True)
visit_end_date = models.DateField()
visit_end_datetime = models.DateTimeField(blank=True, null=True)
visit_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'visit_type_concept')
#provider = models.ForeignKey(Provider, models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
#care_site = models.ForeignKey(CareSite, models.DO_NOTHING, blank=True, null=True)
care_site = models.IntegerField(blank=True, null=True)
visit_source_value = models.CharField(max_length=50, blank=True, null=True)
visit_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'visit_source_concept')
admitting_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'admitting_source_concept')
admitting_source_value = models.CharField(max_length=50, blank=True, null=True)
discharge_to_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'discharge_to_source_concept')
discharge_to_source_value = models.CharField(max_length=50, blank=True, null=True)
preceding_visit_occurrence_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'visit_occurrence'
class Vocabulary(models.Model):
vocabulary_id = models.CharField(primary_key=True, max_length=20)
vocabulary_name = models.CharField(max_length=255)
vocabulary_reference = models.CharField(max_length=255, blank=True, null=True)
vocabulary_version = models.CharField(max_length=255, blank=True, null=True)
vocabulary_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'vocabulary_concept')
class Meta:
managed = False
db_table = 'vocabulary'
class VocabularyConcept(models.Model):
vocabulary_id = models.CharField(max_length=20)
concept_code = models.CharField(max_length=50)
class Meta:
managed = False
db_table = 'vocabulary_concept'
unique_together = (('vocabulary_id', 'concept_code'),)
class StudyMappingArguments(models.Model):
study_id = models.IntegerField(blank=True, null=True)
from_table = models.CharField(max_length=100, blank=True, null=True)
from_column = models.CharField(max_length=100, blank=True, null=True)
function_name = models.CharField(max_length=100, blank=True, null=True)
from_where_clause = models.CharField(max_length=100, blank=True, null=True)
from_where_column = models.CharField(max_length=100, blank=True, null=True)
mapped_string = models.CharField(max_length=100, blank=True, null=True)
mapped_number = models.IntegerField(blank=True, null=True)
mapped_concept_vocabulary_id = models.CharField(max_length=20, blank=True, null=True)
mapped_concept_code = models.CharField(max_length=50, blank=True, null=True)
transform_factor = models.FloatField(blank=True, null=True)
transform_shift = models.FloatField(blank=True, null=True)
to_concept_vocabulary_id = models.CharField(max_length=20, blank=True, null=True)
to_concept_code = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return "study_id:{} from_table:{} from_column:{} function_name:{} from_where_clause:{} from_where_column:{} mapped_string:{} mapped_number:{} mapped_concept_vocabulary_id:{} mapped_concept_code:{} transform_factor:{} transform_shift:{} to_concept_vocabulary_id:{} to_concept_code:{}".format(
self.study_id,
self.from_table,
self.from_column,
self.function_name,
self.from_where_clause,
self.from_where_column,
self.mapped_string,
self.mapped_number,
self.mapped_concept_vocabulary_id,
self.mapped_concept_code,
self.transform_factor,
self.transform_shift,
self.to_concept_vocabulary_id,
self.to_concept_code)
class Meta:
managed = False
db_table = 'study_mapping_arguments'
| 50.740546
| 300
| 0.744498
| 6,131
| 48,305
| 5.613277
| 0.058392
| 0.074008
| 0.106901
| 0.139794
| 0.812262
| 0.768415
| 0.732471
| 0.675432
| 0.628621
| 0.569345
| 0
| 0.01671
| 0.14394
| 48,305
| 951
| 301
| 50.793901
| 0.815535
| 0.411345
| 0
| 0.330377
| 0
| 0.002217
| 0.092635
| 0.02633
| 0
| 0
| 0
| 0.001052
| 0
| 1
| 0.004435
| false
| 0
| 0.008869
| 0.004435
| 0.791574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
f2c3001f68fb60667771dffa13aaa385ab3d02ef
| 87,668
|
py
|
Python
|
pysnmp_mibs/POLICY-BASED-MANAGEMENT-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 6
|
2017-04-21T13:48:08.000Z
|
2022-01-06T19:42:52.000Z
|
pysnmp_mibs/POLICY-BASED-MANAGEMENT-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 1
|
2020-05-05T16:42:25.000Z
|
2020-05-05T16:42:25.000Z
|
pysnmp_mibs/POLICY-BASED-MANAGEMENT-MIB.py
|
jackjack821/pysnmp-mibs
|
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
|
[
"BSD-2-Clause"
] | 6
|
2020-02-08T20:28:49.000Z
|
2021-09-14T13:36:46.000Z
|
#
# PySNMP MIB module POLICY-BASED-MANAGEMENT-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/POLICY-BASED-MANAGEMENT-MIB
# Produced by pysmi-0.0.7 at Sun Feb 14 00:23:59 2016
# On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose
# Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52)
#
( OctetString, Integer, ObjectIdentifier, ) = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
( SnmpAdminString, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
( NotificationGroup, ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
( Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, ObjectIdentity, ModuleIdentity, mib_2, TimeTicks, Counter64, iso, Unsigned32, Bits, MibIdentifier, Counter32, NotificationType, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "ObjectIdentity", "ModuleIdentity", "mib-2", "TimeTicks", "Counter64", "iso", "Unsigned32", "Bits", "MibIdentifier", "Counter32", "NotificationType")
( DisplayString, RowStatus, TextualConvention, DateAndTime, RowPointer, StorageType, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention", "DateAndTime", "RowPointer", "StorageType")
pmMib = ModuleIdentity((1, 3, 6, 1, 2, 1, 124)).setRevisions(("2005-02-07 00:00",))
if mibBuilder.loadTexts: pmMib.setLastUpdated('200502070000Z')
if mibBuilder.loadTexts: pmMib.setOrganization('IETF SNMP Configuration Working Group')
if mibBuilder.loadTexts: pmMib.setContactInfo('\n\n\n\n\n Steve Waldbusser\n Phone: +1-650-948-6500\n Fax: +1-650-745-0671\n Email: waldbusser@nextbeacon.com\n\n Jon Saperia (WG Co-chair)\n JDS Consulting, Inc.\n 84 Kettell Plain Road.\n Stow MA 01775\n USA\n Phone: +1-978-461-0249\n Fax: +1-617-249-0874\n Email: saperia@jdscons.com\n\n Thippanna Hongal\n Riverstone Networks, Inc.\n 5200 Great America Parkway\n Santa Clara, CA, 95054\n USA\n\n Phone: +1-408-878-6562\n Fax: +1-408-878-6501\n Email: hongal@riverstonenet.com\n\n David Partain (WG Co-chair)\n Postal: Ericsson AB\n P.O. Box 1248\n SE-581 12 Linkoping\n Sweden\n Tel: +46 13 28 41 44\n E-mail: David.Partain@ericsson.com\n\n Any questions or comments about this document can also be\n directed to the working group at snmpconf@snmp.com.')
if mibBuilder.loadTexts: pmMib.setDescription('The MIB module for policy-based configuration of SNMP\n infrastructures.\n\n Copyright (C) The Internet Society (2005). This version of\n this MIB module is part of RFC 4011; see the RFC itself for\n full legal notices.')
class PmUTF8String(OctetString, TextualConvention):
subtypeSpec = OctetString.subtypeSpec+ValueSizeConstraint(0,65535)
pmPolicyTable = MibTable((1, 3, 6, 1, 2, 1, 124, 1), )
if mibBuilder.loadTexts: pmPolicyTable.setDescription('The policy table. A policy is a pairing of a\n policyCondition and a policyAction that is used to apply the\n action to a selected set of elements.')
pmPolicyEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 1, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyAdminGroup"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyIndex"))
if mibBuilder.loadTexts: pmPolicyEntry.setDescription('An entry in the policy table representing one policy.')
pmPolicyAdminGroup = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 1), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,32)))
if mibBuilder.loadTexts: pmPolicyAdminGroup.setDescription('An administratively assigned string that can be used to group\n policies for convenience, for readability, or to simplify\n configuration of access control.\n\n The value of this string does not affect policy processing in\n any way. If grouping is not desired or necessary, this object\n may be set to a zero-length string.')
pmPolicyIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)))
if mibBuilder.loadTexts: pmPolicyIndex.setDescription('A unique index for this policy entry, unique among all\n policies regardless of administrative group.')
pmPolicyPrecedenceGroup = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 3), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyPrecedenceGroup.setDescription('An administratively assigned string that is used to group\n policies. For each element, only one policy in the same\n precedence group may be active on that element. If multiple\n policies would be active on an element (because their\n conditions return non-zero), the execution environment will\n only allow the policy with the highest value of\n pmPolicyPrecedence to be active.\n\n All values of this object must have been successfully\n transformed by Stringprep RFC 3454. Management stations\n must perform this translation and must only set this object to\n string values that have been transformed.')
pmPolicyPrecedence = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0,65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyPrecedence.setDescription("If, while checking to see which policy conditions match an\n element, 2 or more ready policies in the same precedence group\n match the same element, the pmPolicyPrecedence object provides\n the rule to arbitrate which single policy will be active on\n 'this element'. Of policies in the same precedence group, only\n the ready and matching policy with the highest precedence\n value (e.g., 2 is higher than 1) will have its policy action\n periodically executed on 'this element'.\n\n When a policy is active on an element but the condition ceases\n to match the element, its action (if currently running) will\n be allowed to finish and then the condition-matching ready\n policy with the next-highest precedence will immediately\n become active (and have its action run immediately). If the\n condition of a higher-precedence ready policy suddenly begins\n matching an element, the previously-active policy's action (if\n currently running) will be allowed to finish and then the\n higher precedence policy will immediately become active. Its\n action will run immediately, and any lower-precedence matching\n policy will not be active anymore.\n\n In the case where multiple ready policies share the highest\n value, it is an implementation-dependent matter as to which\n single policy action will be chosen.\n\n Note that if it is necessary to take certain actions after a\n policy is no longer active on an element, these actions should\n be included in a lower-precedence policy that is in the same\n precedence group.")
pmPolicySchedule = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicySchedule.setDescription("This policy will be ready if any of the associated schedule\n entries are active.\n\n If the value of this object is 0, this policy is always\n ready.\n\n If the value of this object is non-zero but doesn't\n refer to a schedule group that includes an active schedule,\n then the policy will not be ready, even if this is due to a\n misconfiguration of this object or the pmSchedTable.")
pmPolicyElementTypeFilter = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 6), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyElementTypeFilter.setDescription("This object specifies the element types for which this policy\n can be executed.\n\n The format of this object will be a sequence of\n pmElementTypeRegOIDPrefix values, encoded in the following\n BNF form:\n\n elementTypeFilter: oid [ ';' oid ]*\n oid: subid [ '.' subid ]*\n subid: '0' | decimal_constant\n\n For example, to register for the policy to be run on all\n interface elements, the 'ifEntry' element type will be\n registered as '1.3.6.1.2.1.2.2.1'.\n\n If a value is included that does not represent a registered\n pmElementTypeRegOIDPrefix, then that value will be ignored.")
pmPolicyConditionScriptIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmPolicyConditionScriptIndex.setDescription("A pointer to the row or rows in the pmPolicyCodeTable that\n contain the condition code for this policy. When a policy\n entry is created, a pmPolicyCodeIndex value unused by this\n policy's adminGroup will be assigned to this object.\n\n A policy condition is one or more PolicyScript statements\n that result(s) in a boolean value that represents whether\n an element is a member of a set of elements upon which an\n action is to be performed. If a policy is ready and the\n condition returns true for an element of a proper element\n type, and if no higher-precedence policy should be active,\n then the policy is active on that element.\n\n Condition evaluation stops immediately when any run-time\n exception is detected, and the policyAction is not executed.\n\n The policyCondition is evaluated for various elements. Any\n element for which the policyCondition returns any nonzero value\n will match the condition and will have the associated\n\n\n\n policyAction executed on that element unless a\n higher-precedence policy in the same precedence group also\n matches 'this element'.\n\n If the condition object is empty (contains no code) or\n otherwise does not return a value, the element will not be\n matched.\n\n When this condition is executed, if SNMP requests are made to\n the local system and secModel/secName/secLevel aren't\n specified, access to objects is under the security\n credentials of the requester who most recently modified the\n associated pmPolicyAdminStatus object. If SNMP requests are\n made in which secModel/secName/secLevel are specified, then\n the specified credentials are retrieved from the local\n configuration datastore only if VACM is configured to\n allow access to the requester who most recently modified the\n associated pmPolicyAdminStatus object. See the Security\n Considerations section for more information.")
pmPolicyActionScriptIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmPolicyActionScriptIndex.setDescription("A pointer to the row or rows in the pmPolicyCodeTable that\n contain the action code for this policy. When a policy entry\n is created, a pmPolicyCodeIndex value unused by this policy's\n adminGroup will be assigned to this object.\n\n A PolicyAction is an operation performed on a\n set of elements for which the policy is active.\n\n Action evaluation stops immediately when any run-time\n exception is detected.\n\n When this condition is executed, if SNMP requests are made to\n the local system and secModel/secName/secLevel aren't\n specified, access to objects is under the security\n credentials of the requester who most recently modified the\n associated pmPolicyAdminStatus object. If SNMP requests are\n made in which secModel/secName/secLevel are specified, then\n the specified credentials are retrieved from the local\n configuration datastore only if VACM is configured to\n allow access to the requester who most recently modified the\n associated pmPolicyAdminStatus object. See the Security\n Considerations section for more information.")
pmPolicyParameters = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0,65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyParameters.setDescription('From time to time, policy scripts may seek one or more\n parameters (e.g., site-specific constants). These parameters\n may be installed with the script in this object and are\n accessible to the script via the getParameters() function. If\n it is necessary for multiple parameters to be passed to the\n script, the script can choose whatever encoding/delimiting\n mechanism is most appropriate.')
pmPolicyConditionMaxLatency = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyConditionMaxLatency.setDescription('Every element under the control of this agent is\n re-checked periodically to see whether it is under control\n of this policy by re-running the condition for this policy.\n This object lets the manager control the maximum amount of\n time that may pass before an element is re-checked.\n\n In other words, in any given interval of this duration, all\n elements must be re-checked. Note that how the policy agent\n schedules the checking of various elements within this\n interval is an implementation-dependent matter.\n Implementations may wish to re-run a condition more\n quickly if they note a change to the role strings for an\n element.')
pmPolicyActionMaxLatency = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyActionMaxLatency.setDescription("Every element that matches this policy's condition and is\n therefore under control of this policy will have this policy's\n action executed periodically to ensure that the element\n remains in the state dictated by the policy.\n This object lets the manager control the maximum amount of\n\n\n\n time that may pass before an element has the action run on\n it.\n\n In other words, in any given interval of this duration, all\n elements under control of this policy must have the action run\n on them. Note that how the policy agent schedules the policy\n action on various elements within this interval is an\n implementation-dependent matter.")
pmPolicyMaxIterations = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 12), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyMaxIterations.setDescription("If a condition or action script iterates in loops too many\n times in one invocation, the execution environment may\n consider it in an infinite loop or otherwise not acting\n as intended and may be terminated by the execution\n environment. The execution environment will count the\n cumulative number of times all 'for' or 'while' loops iterated\n and will apply a threshold to determine when to terminate the\n script. What threshold the execution environment uses is an\n implementation-dependent manner, but the value of\n this object SHOULD be the basis for choosing the threshold for\n each script. The value of this object represents a\n policy-specific threshold and can be tuned for policies of\n varying workloads. If this value is zero, no\n threshold will be enforced except for any\n implementation-dependent maximum. Regardless of this value,\n the agent is allowed to terminate any script invocation that\n exceeds a local CPU or memory limitation.\n\n Note that the condition and action invocations are tracked\n separately.")
pmPolicyDescription = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 13), PmUTF8String()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyDescription.setDescription('A description of this rule and its significance, typically\n provided by a human.')
pmPolicyMatches = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 14), Gauge32()).setUnits('elements').setMaxAccess("readonly")
if mibBuilder.loadTexts: pmPolicyMatches.setDescription('The number of elements that, in their most recent execution\n of the associated condition, were matched by the condition.')
pmPolicyAbnormalTerminations = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 15), Gauge32()).setUnits('elements').setMaxAccess("readonly")
if mibBuilder.loadTexts: pmPolicyAbnormalTerminations.setDescription('The number of elements that, in their most recent execution\n of the associated condition or action, have experienced a\n run-time exception and terminated abnormally. Note that if a\n policy was experiencing a run-time exception while processing\n a particular element but runs normally on a subsequent\n invocation, this number can decline.')
pmPolicyExecutionErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 16), Counter32()).setUnits('errors').setMaxAccess("readonly")
if mibBuilder.loadTexts: pmPolicyExecutionErrors.setDescription("The total number of times that execution of this policy's\n condition or action has been terminated due to run-time\n exceptions.")
pmPolicyDebugging = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("off", 1), ("on", 2),)).clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyDebugging.setDescription('The status of debugging for this policy. If this is turned\n on(2), log entries will be created in the pmDebuggingTable\n for each run-time exception that is experienced by this\n policy.')
pmPolicyAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2), ("enabledAutoRemove", 3),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyAdminStatus.setDescription('The administrative status of this policy.\n\n The policy will be valid only if the associated\n pmPolicyRowStatus is set to active(1) and this object is set\n to enabled(2) or enabledAutoRemove(3).\n\n If this object is set to enabledAutoRemove(3), the next time\n the associated schedule moves from the active state to the\n inactive state, this policy will immediately be deleted,\n including any associated entries in the pmPolicyCodeTable.\n\n The following related objects may not be changed unless this\n object is set to disabled(1):\n pmPolicyPrecedenceGroup, pmPolicyPrecedence,\n pmPolicySchedule, pmPolicyElementTypeFilter,\n pmPolicyConditionScriptIndex, pmPolicyActionScriptIndex,\n pmPolicyParameters, and any pmPolicyCodeTable row\n referenced by this policy.\n In order to change any of these parameters, the policy must\n be moved to the disabled(1) state, changed, and then\n re-enabled.\n\n When this policy moves to either enabled state from the\n disabled state, any cached values of policy condition must be\n erased, and any Policy or PolicyElement scratchpad values for\n this policy should be removed. Policy execution will begin by\n testing the policy condition on all appropriate elements.')
pmPolicyStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 19), StorageType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyStorageType.setDescription("This object defines whether this policy and any associated\n entries in the pmPolicyCodeTable are kept in volatile storage\n and lost upon reboot or if this row is backed up by\n non-volatile or permanent storage.\n\n\n\n\n If the value of this object is 'permanent', the values for\n the associated pmPolicyAdminStatus object must remain\n writable.")
pmPolicyRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 1, 1, 20), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyRowStatus.setDescription('The row status of this pmPolicyEntry.\n\n The status may not be set to active if any of the related\n entries in the pmPolicyCode table do not have a status of\n active or if any of the objects in this row are not set to\n valid values. Only the following objects may be modified\n while in the active state:\n pmPolicyParameters\n pmPolicyConditionMaxLatency\n pmPolicyActionMaxLatency\n pmPolicyDebugging\n pmPolicyAdminStatus\n\n If this row is deleted, any associated entries in the\n pmPolicyCodeTable will be deleted as well.')
pmPolicyCodeTable = MibTable((1, 3, 6, 1, 2, 1, 124, 2), )
if mibBuilder.loadTexts: pmPolicyCodeTable.setDescription("The pmPolicyCodeTable stores the code for policy conditions and\n actions.\n\n An example of the relationships between the code table and the\n policy table follows:\n\n pmPolicyTable\n AdminGroup Index ConditionScriptIndex ActionScriptIndex\n A '' 1 1 2\n B 'oper' 1 1 2\n C 'oper' 2 3 4\n\n pmPolicyCodeTable\n AdminGroup ScriptIndex Segment Note\n\n\n\n '' 1 1 Filter for policy A\n '' 2 1 Action for policy A\n 'oper' 1 1 Filter for policy B\n 'oper' 2 1 Action 1/2 for policy B\n 'oper' 2 2 Action 2/2 for policy B\n 'oper' 3 1 Filter for policy C\n 'oper' 4 1 Action for policy C\n\n In this example, there are 3 policies: 1 in the '' adminGroup,\n and 2 in the 'oper' adminGroup. Policy A has been assigned\n script indexes 1 and 2 (these script indexes are assigned out of\n a separate pool per adminGroup), with 1 code segment each for\n the filter and the action. Policy B has been assigned script\n indexes 1 and 2 (out of the pool for the 'oper' adminGroup).\n While the filter has 1 segment, the action is longer and is\n loaded into 2 segments. Finally, Policy C has been assigned\n script indexes 3 and 4, with 1 code segment each for the filter\n and the action.")
pmPolicyCodeEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 2, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyAdminGroup"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyCodeScriptIndex"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyCodeSegment"))
if mibBuilder.loadTexts: pmPolicyCodeEntry.setDescription('An entry in the policy code table representing one code\n segment. Entries that share a common AdminGroup/ScriptIndex\n pair make up a single script. Valid values of ScriptIndex are\n retrieved from pmPolicyConditionScriptIndex and\n pmPolicyActionScriptIndex after a pmPolicyEntry is\n created. Segments of code can then be written to this table\n with the learned ScriptIndex values.\n\n The StorageType of this entry is determined by the value of\n the associated pmPolicyStorageType.\n\n The pmPolicyAdminGroup element of the index represents the\n administrative group of the policy of which this code entry is\n a part.')
pmPolicyCodeScriptIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)))
if mibBuilder.loadTexts: pmPolicyCodeScriptIndex.setDescription('A unique index for each policy condition or action. The code\n for each such condition or action may be composed of multiple\n entries in this table if the code cannot fit in one entry.\n Values of pmPolicyCodeScriptIndex may not be used unless\n they have previously been assigned in the\n pmPolicyConditionScriptIndex or pmPolicyActionScriptIndex\n objects.')
pmPolicyCodeSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)))
if mibBuilder.loadTexts: pmPolicyCodeSegment.setDescription('A unique index for each segment of a policy condition or\n action.\n\n When a policy condition or action spans multiple entries in\n this table, the code of that policy starts from the\n lowest-numbered segment and continues with increasing segment\n values until it ends with the highest-numbered segment.')
pmPolicyCodeText = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 2, 1, 3), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(1,1024))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyCodeText.setDescription('A segment of policy code (condition or action). Lengthy\n Policy conditions or actions may be stored in multiple\n segments in this table that share the same value of\n pmPolicyCodeScriptIndex. When multiple segments are used, it\n is recommended that each segment be as large as is practical.\n\n Entries in this table are associated with policies by values\n of the pmPolicyConditionScriptIndex and\n pmPolicyActionScriptIndex objects. If the status of the\n related policy is active, then this object may not be\n modified.')
pmPolicyCodeStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmPolicyCodeStatus.setDescription('The status of this code entry.\n\n Entries in this table are associated with policies by values\n of the pmPolicyConditionScriptIndex and\n pmPolicyActionScriptIndex objects. If the status of the\n related policy is active, then this object can not be\n modified (i.e., deleted or set to notInService), nor may new\n entries be created.\n\n If the status of this object is active, no objects in this\n row may be modified.')
pmElementTypeRegTable = MibTable((1, 3, 6, 1, 2, 1, 124, 3), )
if mibBuilder.loadTexts: pmElementTypeRegTable.setDescription("A registration table for element types managed by this\n system.\n\n The Element Type Registration table allows the manager to\n learn what element types are being managed by the system and\n to register new types, if necessary. An element type is\n registered by providing the OID of an SNMP object (i.e.,\n without the instance). Each SNMP instance that exists under\n that object is a distinct element. The index of the element is\n the index part of the discovered OID. This index will be\n supplied to policy conditions and actions so that this code\n can inspect and configure the element.\n\n For example, this table might contain the following entries.\n The first three are agent-installed, and the 4th was\n downloaded by a management station:\n\n OIDPrefix MaxLatency Description StorageType\n ifEntry 100 mS interfaces - builtin readOnly\n 0.0 100 mS system element - builtin readOnly\n frCircuitEntry 100 mS FR Circuits - builtin readOnly\n hrSWRunEntry 60 sec Running Processes volatile\n\n\n\n\n Note that agents may automatically configure elements in this\n table for frequently used element types (interfaces, circuits,\n etc.). In particular, it may configure elements for whom\n discovery is optimized in one or both of the following ways:\n\n 1. The agent may discover elements by scanning internal data\n structures as opposed to issuing local SNMP requests. It is\n possible to recreate the exact semantics described in this\n table even if local SNMP requests are not issued.\n\n 2. The agent may receive asynchronous notification of new\n elements (for example, 'card inserted') and use that\n information to instantly create elements rather than\n through polling. A similar feature might be available for\n the deletion of elements.\n\n Note that the disposition of agent-installed entries is\n described by the pmPolicyStorageType object.")
pmElementTypeRegEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 3, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmElementTypeRegOIDPrefix"))
if mibBuilder.loadTexts: pmElementTypeRegEntry.setDescription("A registration of an element type.\n\n Note that some values of this table's index may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP protocol.\n Implementations should take care to avoid such values.")
pmElementTypeRegOIDPrefix = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 3, 1, 2), ObjectIdentifier())
if mibBuilder.loadTexts: pmElementTypeRegOIDPrefix.setDescription("This OBJECT IDENTIFIER value identifies a table in which all\n\n\n\n elements of this type will be found. Every row in the\n referenced table will be treated as an element for the\n period of time that it remains in the table. The agent will\n then execute policy conditions and actions as appropriate on\n each of these elements.\n\n This object identifier value is specified down to the 'entry'\n component (e.g., ifEntry) of the identifier.\n\n The index of each discovered row will be passed to each\n invocation of the policy condition and policy action.\n\n The actual mechanism by which instances are discovered is\n implementation dependent. Periodic walks of the table to\n discover the rows in the table is one such mechanism. This\n mechanism has the advantage that it can be performed by an\n agent with no knowledge of the names, syntax, or semantics\n of the MIB objects in the table. This mechanism also serves as\n the reference design. Other implementation-dependent\n mechanisms may be implemented that are more efficient (perhaps\n because they are hard coded) or that don't require polling.\n These mechanisms must discover the same elements as would the\n table-walking reference design.\n\n This object can contain a OBJECT IDENTIFIER, '0.0'.\n '0.0' represents the single instance of the system\n itself and provides an execution context for policies to\n operate on the 'system element' and on MIB objects\n modeled as scalars. For example, '0.0' gives an execution\n context for policy-based selection of the operating system\n code version (likely modeled as a scalar MIB object). The\n element type '0.0' always exists; as a consequence, no actual\n discovery will take place, and the pmElementTypeRegMaxLatency\n object will have no effect for the '0.0' element\n type. However, if the '0.0' element type is not registered in\n the table, policies will not be executed on the '0.0' element.\n\n When a policy is invoked on behalf of a '0.0' entry in this\n table, the element name will be '0.0', and there is no index\n of 'this element' (in other words, it has zero length).\n\n As this object is used in the index for the\n pmElementTypeRegTable, users of this table should be careful\n not to create entries that would result in instance names with\n more than 128 sub-identifiers.")
pmElementTypeRegMaxLatency = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 3, 1, 3), Unsigned32()).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmElementTypeRegMaxLatency.setDescription('The PM agent is responsible for discovering new elements of\n types that are registered. This object lets the manager\n control the maximum amount of time that may pass between the\n time an element is created and when it is discovered.\n\n In other words, in any given interval of this duration, all\n new elements must be discovered. Note that how the policy\n agent schedules the checking of various elements within this\n interval is an implementation-dependent matter.')
pmElementTypeRegDescription = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 3, 1, 4), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmElementTypeRegDescription.setDescription('A descriptive label for this registered type.')
pmElementTypeRegStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 3, 1, 5), StorageType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmElementTypeRegStorageType.setDescription("This object defines whether this row is kept\n in volatile storage and lost upon reboot or\n backed up by non-volatile or permanent storage.\n\n If the value of this object is 'permanent', no values in the\n associated row have to be writable.")
pmElementTypeRegRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 3, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmElementTypeRegRowStatus.setDescription('The status of this registration entry.\n\n If the value of this object is active, no objects in this row\n may be modified.')
pmRoleTable = MibTable((1, 3, 6, 1, 2, 1, 124, 4), )
if mibBuilder.loadTexts: pmRoleTable.setDescription("The pmRoleTable is a read-create table that organizes role\n strings sorted by element. This table is used to create and\n modify role strings and their associations, as well as to allow\n a management station to learn about the existence of roles and\n their associations.\n\n It is the responsibility of the agent to keep track of any\n re-indexing of the underlying SNMP elements and to continue to\n associate role strings with the element with which they were\n initially configured.\n\n Policy MIB agents that have elements in multiple local SNMP\n contexts have to allow some roles to be assigned to elements\n in particular contexts. This is particularly true when some\n elements have the same names in different contexts and the\n context is required to disambiguate them. In those situations,\n a value for the pmRoleContextName may be provided. When a\n pmRoleContextName value is not provided, the assignment is to\n the element in the default context.\n\n Policy MIB agents that discover elements on other systems and\n execute policies on their behalf need to have access to role\n information for these remote elements. In such situations,\n role assignments for other systems can be stored in this table\n by providing values for the pmRoleContextEngineID parameters.\n\n For example:\n Example:\n element role context ctxEngineID #comment\n ifindex.1 gold local, default context\n ifindex.2 gold local, default context\n repeaterid.1 foo rptr1 local, rptr1 context\n repeaterid.1 bar rptr2 local, rptr2 context\n ifindex.1 gold '' A different system\n ifindex.1 gold '' B different system\n\n The agent must store role string associations in non-volatile\n storage.")
pmRoleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 4, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmRoleElement"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmRoleContextName"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmRoleContextEngineID"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmRoleString"))
if mibBuilder.loadTexts: pmRoleEntry.setDescription('A role string entry associates a role string with an\n individual element.\n\n Note that some combinations of index values may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP\n protocol. Implementations should take care to avoid such\n combinations.')
pmRoleElement = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 4, 1, 1), RowPointer())
if mibBuilder.loadTexts: pmRoleElement.setDescription("The element with which this role string is associated.\n\n For example, if the element is interface 3, then this object\n will contain the OID for 'ifIndex.3'.\n\n If the agent assigns new indexes in the MIB table to\n represent the same underlying element (re-indexing), the\n agent will modify this value to contain the new index for the\n underlying element.\n\n As this object is used in the index for the pmRoleTable,\n users of this table should be careful not to create entries\n that would result in instance names with more than 128\n sub-identifiers.")
pmRoleContextName = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 4, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32)))
if mibBuilder.loadTexts: pmRoleContextName.setDescription('If the associated element is not in the default SNMP context\n for the target system, this object is used to identify the\n context. If the element is in the default context, this object\n is equal to the empty string.')
pmRoleContextEngineID = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 4, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0,0),ValueSizeConstraint(5,32),)))
if mibBuilder.loadTexts: pmRoleContextEngineID.setDescription('If the associated element is on a remote system, this object\n is used to identify the remote system. This object contains\n the contextEngineID of the system for which this role string\n assignment is valid. If the element is on the local system\n this object will be the empty string.')
pmRoleString = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 4, 1, 4), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,64)))
if mibBuilder.loadTexts: pmRoleString.setDescription('The role string that is associated with an element through\n this table. All role strings must have been successfully\n transformed by Stringprep RFC 3454. Management stations\n must perform this translation and must only set this object\n to string values that have been transformed.\n\n A role string is an administratively specified characteristic\n of a managed element (for example, an interface). It is a\n selector for policy rules, that determines the applicability of\n the rule to a particular managed element.')
pmRoleStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 4, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmRoleStatus.setDescription('The status of this role string.\n\n\n\n\n\n If the value of this object is active, no object in this row\n may be modified.')
pmCapabilitiesTable = MibTable((1, 3, 6, 1, 2, 1, 124, 5), )
if mibBuilder.loadTexts: pmCapabilitiesTable.setDescription("The pmCapabilitiesTable contains a description of\n the inherent capabilities of the system so that\n management stations can learn of an agent's capabilities and\n differentially install policies based on the capabilities.\n\n Capabilities are expressed at the system level. There can be\n variation in how capabilities are realized from one vendor or\n model to the next. Management systems should consider these\n differences before selecting which policy to install in a\n system.")
pmCapabilitiesEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 5, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesType"))
if mibBuilder.loadTexts: pmCapabilitiesEntry.setDescription("A capabilities entry holds an OID indicating support for a\n particular capability. Capabilities may include hardware and\n software functions and the implementation of MIB\n Modules. The semantics of the OID are defined in the\n description of pmCapabilitiesType.\n\n Entries appear in this table if any element in the system has\n a specific capability. A capability should appear in this\n table only once, regardless of the number of elements in the\n system with that capability. An entry is removed from this\n table when the last element in the system that has the\n capability is removed. In some cases, capabilities are\n dynamic and exist only in software. This table should have an\n entry for the capability even if there are no current\n instances. Examples include systems with database or WEB\n services. While the system has the ability to create new\n databases or WEB services, the entry should exist. In these\n cases, the ability to create these services could come from\n other processes that are running in the system, even though\n there are no currently open databases or WEB servers running.\n\n\n\n Capabilities may include the implementation of MIB Modules\n but need not be limited to those that represent MIB Modules\n with one or more configurable objects. It may also be\n valuable to include entries for capabilities that do not\n include configuration objects, as that information, in\n combination with other entries in this table, might be used\n by the management software to determine whether to\n install a policy.\n\n Vendor software may also add entries in this table to express\n capabilities from their private branch.\n\n Note that some values of this table's index may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP\n protocol. Implementations should take care to avoid such\n values.")
pmCapabilitiesType = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 5, 1, 1), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmCapabilitiesType.setDescription("There are three types of OIDs that may be present in the\n pmCapabilitiesType object:\n\n 1) The OID of a MODULE-COMPLIANCE macro that represents the\n highest level of compliance realized by the agent for that\n MIB Module. For example, an agent that implements the OSPF\n MIB Module at the highest level of compliance would have the\n value of '1.3.6.1.2.1.14.15.2' in the pmCapabilitiesType\n object. For software that realizes standard MIB\n Modules that do not have compliance statements, the base OID\n of the MIB Module should be used instead. If the OSPF MIB\n Module had not been created with a compliance statement, then\n the correct value of the pmCapabilitiesType would be\n '1.3.6.1.2.1.14'. In the cases where multiple compliance\n statements in a MIB Module are supported by the agent, and\n where one compliance statement does not by definition include\n the other, each of the compliance OIDs would have entries in\n this table.\n\n\n\n\n MIB Documents can contain more than one MIB Module. In the\n case of OSPF, there is a second MIB Module\n that describes notifications for the OSPF Version 2 Protocol.\n If the agent also realizes these functions, an entry will\n also exist for those capabilities in this table.\n\n 2) Vendors should install OIDs in this table that represent\n vendor-specific capabilities. These capabilities can be\n expressed just as those described above for MIB Modules on\n the standards track. In addition, vendors may install any\n OID they desire from their registered branch. The OIDs may be\n at any level of granularity, from the root of their entire\n branch to an instance of a single OID. There is no\n restriction on the number of registrations they may make,\n though care should be taken to avoid unnecessary entries.\n\n 3) OIDs that represent one capability or a collection of\n capabilities that could be any collection of MIB Objects or\n hardware or software functions may be created in working\n groups and registered in a MIB Module. Other entities (e.g.,\n vendors) may also make registrations. Software will register\n these standard capability OIDs, as well as vendor specific\n OIDs.\n\n If the OID for a known capability is not present in the\n table, then it should be assumed that the capability is not\n implemented.\n\n As this object is used in the index for the\n pmCapabilitiesTable, users of this table should be careful\n not to create entries that would result in instance names\n with more than 128 sub-identifiers.")
pmCapabilitiesOverrideTable = MibTable((1, 3, 6, 1, 2, 1, 124, 6), )
if mibBuilder.loadTexts: pmCapabilitiesOverrideTable.setDescription('The pmCapabilitiesOverrideTable allows management stations\n to override pmCapabilitiesTable entries that have been\n registered by the agent. This facility can be used to avoid\n situations in which managers in the network send policies to\n a system that has advertised a capability in the\n pmCapabilitiesTable but that should not be installed on this\n particular system. One example could be newly deployed\n\n\n\n equipment that is still in a trial state in a trial state or\n resources reserved for some other administrative reason.\n This table can also be used to override entries in the\n pmCapabilitiesTable through the use of the\n pmCapabilitiesOverrideState object. Capabilities can also be\n declared available in this table that were not registered in\n the pmCapabilitiesTable. A management application can make\n an entry in this table for any valid OID and declare the\n capability available by setting the\n pmCapabilitiesOverrideState for that row to valid(1).')
pmCapabilitiesOverrideEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 6, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesOverrideType"))
if mibBuilder.loadTexts: pmCapabilitiesOverrideEntry.setDescription("An entry in this table indicates whether a particular\n capability is valid or invalid.\n\n Note that some values of this table's index may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP\n protocol. Implementations should take care to avoid such\n values.")
pmCapabilitiesOverrideType = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 6, 1, 1), ObjectIdentifier())
if mibBuilder.loadTexts: pmCapabilitiesOverrideType.setDescription('This is the OID of the capability that is declared valid or\n invalid by the pmCapabilitiesOverrideState value for this\n row. Any valid OID, as described in the pmCapabilitiesTable,\n is permitted in the pmCapabilitiesOverrideType object. This\n means that capabilities can be expressed at any level, from a\n specific instance of an object to a table or entire module.\n There are no restrictions on whether these objects are from\n standards track MIB documents or in the private branch of the\n MIB.\n\n\n\n If an entry exists in this table for which there is a\n corresponding entry in the pmCapabilitiesTable, then this entry\n shall have precedence over the entry in the\n pmCapabilitiesTable. All entries in this table must be\n preserved across reboots.\n\n As this object is used in the index for the\n pmCapabilitiesOverrideTable, users of this table should be\n careful not to create entries that would result in instance\n names with more than 128 sub-identifiers.')
pmCapabilitiesOverrideState = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmCapabilitiesOverrideState.setDescription('A pmCapabilitiesOverrideState of invalid indicates that\n management software should not send policies to this system\n for the capability identified in the\n pmCapabilitiesOverrideType for this row of the table. This\n behavior is the same whether the capability represented by\n the pmCapabilitiesOverrideType exists only in this table\n (that is, it was installed by an external management\n application) or exists in this table as well as the\n pmCapabilitiesTable. This would be the case when a manager\n wanted to disable a capability that the native management\n system found and registered in the pmCapabilitiesTable.\n\n An entry in this table that has a pmCapabilitiesOverrideState\n of valid should be treated as though it appeared in the\n pmCapabilitiesTable. If the entry also exists in the\n pmCapabilitiesTable in the pmCapabilitiesType object, and if\n the value of this object is valid, then the system shall\n operate as though this entry did not exist and policy\n installations and executions will continue in a normal\n fashion.')
pmCapabilitiesOverrideRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmCapabilitiesOverrideRowStatus.setDescription('The row status of this pmCapabilitiesOverrideEntry.\n\n\n\n If the value of this object is active, no object in this row\n may be modified.')
pmSchedLocalTime = MibScalar((1, 3, 6, 1, 2, 1, 124, 7), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(11,11)).setFixedLength(11)).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmSchedLocalTime.setDescription('The local time used by the scheduler. Schedules that\n refer to calendar time will use the local time indicated\n by this object. An implementation MUST return all 11 bytes\n of the DateAndTime textual-convention so that a manager\n may retrieve the offset from GMT time.')
pmSchedTable = MibTable((1, 3, 6, 1, 2, 1, 124, 8), )
if mibBuilder.loadTexts: pmSchedTable.setDescription('This table defines schedules for policies.')
pmSchedEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 8, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmSchedIndex"))
if mibBuilder.loadTexts: pmSchedEntry.setDescription('An entry describing a particular schedule.\n\n Unless noted otherwise, writable objects of this row can be\n modified independently of the current value of pmSchedRowStatus,\n pmSchedAdminStatus and pmSchedOperStatus. In particular, it\n is legal to modify pmSchedWeekDay, pmSchedMonth, and\n pmSchedDay when pmSchedRowStatus is active.')
pmSchedIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)))
if mibBuilder.loadTexts: pmSchedIndex.setDescription('The locally unique, administratively assigned index for this\n scheduling entry.')
pmSchedGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedGroupIndex.setDescription('The locally unique, administratively assigned index for the\n schedule group this scheduling entry belongs to.\n\n To assign multiple schedule entries to the same group, the\n pmSchedGroupIndex of each entry in the group will be set to\n the same value. This pmSchedGroupIndex value must be equal to\n the pmSchedIndex of one of the entries in the group. If the\n entry whose pmSchedIndex equals the pmSchedGroupIndex\n for the group is deleted, the agent will assign a new\n pmSchedGroupIndex to all remaining members of the group.\n\n If an entry is not a member of a group, its pmSchedGroupIndex\n must be assigned to the value of its pmSchedIndex.\n\n Policies that are controlled by a group of schedule entries\n are active when any schedule in the group is active.')
pmSchedDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 3), PmUTF8String().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedDescr.setDescription('The human-readable description of the purpose of this\n scheduling entry.')
pmSchedTimePeriod = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 4), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedTimePeriod.setDescription("The overall range of calendar dates and times over which this\n schedule is active. It is stored in a slightly extended version\n of the format for a 'period-explicit' defined in RFC 2445.\n This format is expressed as a string representing the\n starting date and time, in which the character 'T' indicates\n the beginning of the time portion, followed by the solidus\n character, '/', followed by a similar string representing an\n end date and time. The start of the period MUST be before the\n end of the period. Date-Time values are expressed as\n substrings of the form 'yyyymmddThhmmss'. For example:\n\n 20000101T080000/20000131T130000\n\n January 1, 2000, 0800 through January 31, 2000, 1PM\n\n The 'Date with UTC time' format defined in RFC 2445 in which\n the Date-Time string ends with the character 'Z' is not\n allowed.\n\n This 'period-explicit' format is also extended to allow two\n special cases in which one of the Date-Time strings is\n replaced with a special string defined in RFC 2445:\n\n 1. If the first Date-Time value is replaced with the string\n 'THISANDPRIOR', then the value indicates that the schedule\n is active at any time prior to the Date-Time that appears\n after the '/'.\n\n 2. If the second Date-Time is replaced with the string\n 'THISANDFUTURE', then the value indicates that the schedule\n is active at any time after the Date-Time that appears\n before the '/'.\n\n\n\n\n Note that although RFC 2445 defines these two strings, they are\n not specified for use in the 'period-explicit' format. The use\n of these strings represents an extension to the\n 'period-explicit' format.")
pmSchedMonth = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 5), Bits().clone(namedValues=NamedValues(("january", 0), ("february", 1), ("march", 2), ("april", 3), ("may", 4), ("june", 5), ("july", 6), ("august", 7), ("september", 8), ("october", 9), ("november", 10), ("december", 11),)).clone(namedValues=NamedValues(("january", 0), ("february", 1), ("march", 2), ("april", 3), ("may", 4), ("june", 5), ("july", 6), ("august", 7), ("september", 8), ("october", 9), ("november", 10), ("december", 11),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedMonth.setDescription('Within the overall time period specified in the\n pmSchedTimePeriod object, the value of this object specifies\n the specific months within that time period when the schedule\n is active. Setting all bits will cause the schedule to act\n independently of the month.')
pmSchedDay = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 6), Bits().clone(namedValues=NamedValues(("d1", 0), ("d2", 1), ("d3", 2), ("d4", 3), ("d5", 4), ("d6", 5), ("d7", 6), ("d8", 7), ("d9", 8), ("d10", 9), ("d11", 10), ("d12", 11), ("d13", 12), ("d14", 13), ("d15", 14), ("d16", 15), ("d17", 16), ("d18", 17), ("d19", 18), ("d20", 19), ("d21", 20), ("d22", 21), ("d23", 22), ("d24", 23), ("d25", 24), ("d26", 25), ("d27", 26), ("d28", 27), ("d29", 28), ("d30", 29), ("d31", 30), ("r1", 31), ("r2", 32), ("r3", 33), ("r4", 34), ("r5", 35), ("r6", 36), ("r7", 37), ("r8", 38), ("r9", 39), ("r10", 40), ("r11", 41), ("r12", 42), ("r13", 43), ("r14", 44), ("r15", 45), ("r16", 46), ("r17", 47), ("r18", 48), ("r19", 49), ("r20", 50), ("r21", 51), ("r22", 52), ("r23", 53), ("r24", 54), ("r25", 55), ("r26", 56), ("r27", 57), ("r28", 58), ("r29", 59), ("r30", 60), ("r31", 61),)).clone(namedValues=NamedValues(("d1", 0), ("d2", 1), ("d3", 2), ("d4", 3), ("d5", 4), ("d6", 5), ("d7", 6), ("d8", 7), ("d9", 8), ("d10", 9), ("d11", 10), ("d12", 11), ("d13", 12), ("d14", 13), ("d15", 14), ("d16", 15), ("d17", 16), ("d18", 17), ("d19", 18), ("d20", 19), ("d21", 20), ("d22", 21), ("d23", 22), ("d24", 23), ("d25", 24), ("d26", 25), ("d27", 26), ("d28", 27), ("d29", 28), ("d30", 29), ("d31", 30), ("r1", 31), ("r2", 32), ("r3", 33), ("r4", 34), ("r5", 35), ("r6", 36), ("r7", 37), ("r8", 38), ("r9", 39), ("r10", 40), ("r11", 41), ("r12", 42), ("r13", 43), ("r14", 44), ("r15", 45), ("r16", 46), ("r17", 47), ("r18", 48), ("r19", 49), ("r20", 50), ("r21", 51), ("r22", 52), ("r23", 53), ("r24", 54), ("r25", 55), ("r26", 56), ("r27", 57), ("r28", 58), ("r29", 59), ("r30", 60), ("r31", 61),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedDay.setDescription("Within the overall time period specified in the\n pmSchedTimePeriod object, the value of this object specifies\n the specific days of the month within that time period when\n the schedule is active.\n\n There are two sets of bits one can use to define the day\n within a month:\n\n Enumerations starting with the letter 'd' indicate a\n day in a month relative to the first day of a month.\n The first day of the month can therefore be specified\n by setting the bit d1(0), and d31(30) means the last\n day of a month with 31 days.\n\n Enumerations starting with the letter 'r' indicate a\n day in a month in reverse order, relative to the last\n day of a month. The last day in the month can therefore\n be specified by setting the bit r1(31), and r31(61) means\n the first day of a month with 31 days.\n\n Setting multiple bits will include several days in the set\n of possible days for this schedule. Setting all bits starting\n with the letter 'd' or all bits starting with the letter 'r'\n will cause the schedule to act independently of the day of the\n month.")
pmSchedWeekDay = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 7), Bits().clone(namedValues=NamedValues(("sunday", 0), ("monday", 1), ("tuesday", 2), ("wednesday", 3), ("thursday", 4), ("friday", 5), ("saturday", 6),)).clone(namedValues=NamedValues(("sunday", 0), ("monday", 1), ("tuesday", 2), ("wednesday", 3), ("thursday", 4), ("friday", 5), ("saturday", 6),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedWeekDay.setDescription('Within the overall time period specified in the\n pmSchedTimePeriod object, the value of this object specifies\n the specific days of the week within that time period when\n the schedule is active. Setting all bits will cause the\n schedule to act independently of the day of the week.')
pmSchedTimeOfDay = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 8), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,15)).clone(hexValue="543030303030302F54323335393539")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedTimeOfDay.setDescription("Within the overall time period specified in the\n pmSchedTimePeriod object, the value of this object specifies\n the range of times in a day when the schedule is active.\n\n This value is stored in a format based on the RFC 2445 format\n for 'time': The character 'T' followed by a 'time' string,\n followed by the solidus character, '/', followed by the\n character 'T', followed by a second time string. The first time\n indicates the beginning of the range, and the second time\n indicates the end. Thus, this value takes the following\n form:\n\n 'Thhmmss/Thhmmss'.\n\n The second substring always identifies a later time than the\n first substring. To allow for ranges that span midnight,\n however, the value of the second string may be smaller than\n the value of the first substring. Thus, 'T080000/T210000'\n identifies the range from 0800 until 2100, whereas\n 'T210000/T080000' identifies the range from 2100 until 0800 of\n the following day.\n\n When a range spans midnight, by definition it includes parts\n of two successive days. When one of these days is also\n selected by either the MonthOfYearMask, DayOfMonthMask, and/or\n DayOfWeekMask, but the other day is not, then the policy is\n active only during the portion of the range that falls on the\n selected day. For example, if the range extends from 2100\n\n\n\n until 0800, and the day of week mask selects Monday and\n Tuesday, then the policy is active during the following three\n intervals:\n\n From midnight Sunday until 0800 Monday\n From 2100 Monday until 0800 Tuesday\n From 2100 Tuesday until 23:59:59 Tuesday\n\n Setting this value to 'T000000/T235959' will cause the\n schedule to act independently of the time of day.")
pmSchedLocalOrUtc = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("localTime", 1), ("utcTime", 2),)).clone('utcTime')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedLocalOrUtc.setDescription('This object indicates whether the times represented in the\n TimePeriod object and in the various Mask objects represent\n local times or UTC times.')
pmSchedStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 10), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedStorageType.setDescription("This object defines whether this schedule entry is kept\n in volatile storage and lost upon reboot or\n backed up by non-volatile or permanent storage.\n\n Conceptual rows having the value 'permanent' must allow write\n access to the columnar objects pmSchedDescr, pmSchedWeekDay,\n pmSchedMonth, and pmSchedDay.\n\n If the value of this object is 'permanent', no values in the\n associated row have to be writable.")
pmSchedRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 8, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: pmSchedRowStatus.setDescription('The status of this schedule entry.\n\n If the value of this object is active, no object in this row\n may be modified.')
pmTrackingPETable = MibTable((1, 3, 6, 1, 2, 1, 124, 9), )
if mibBuilder.loadTexts: pmTrackingPETable.setDescription('The pmTrackingPETable describes what elements\n are active (under control of) a policy. This table is indexed\n in order to optimize retrieval of the entire status for a\n given policy.')
pmTrackingPEEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 9, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyIndex"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingPEElement"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingPEContextName"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingPEContextEngineID"))
if mibBuilder.loadTexts: pmTrackingPEEntry.setDescription('An entry in the pmTrackingPETable. The pmPolicyIndex in\n the index specifies the policy tracked by this entry.\n\n Note that some combinations of index values may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP\n protocol. Implementations should take care to avoid such\n combinations.')
pmTrackingPEElement = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 9, 1, 1), RowPointer())
if mibBuilder.loadTexts: pmTrackingPEElement.setDescription('The element that is acted upon by the associated policy.\n\n As this object is used in the index for the\n pmTrackingPETable, users of this table should be careful not\n to create entries that would result in instance names with\n more than 128 sub-identifiers.')
pmTrackingPEContextName = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 9, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32)))
if mibBuilder.loadTexts: pmTrackingPEContextName.setDescription('If the associated element is not in the default SNMP context\n for the target system, this object is used to identify the\n context. If the element is in the default context, this object\n is equal to the empty string.')
pmTrackingPEContextEngineID = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 9, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0,0),ValueSizeConstraint(5,32),)))
if mibBuilder.loadTexts: pmTrackingPEContextEngineID.setDescription('If the associated element is on a remote system, this object\n is used to identify the remote system. This object contains\n the contextEngineID of the system on which the associated\n element resides. If the element is on the local system,\n this object will be the empty string.')
pmTrackingPEInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 9, 1, 4), Bits().clone(namedValues=NamedValues(("actionSkippedDueToPrecedence", 0), ("conditionRunTimeException", 1), ("conditionUserSignal", 2), ("actionRunTimeException", 3), ("actionUserSignal", 4),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmTrackingPEInfo.setDescription("This object returns information about the previous policy\n script executions.\n\n If the actionSkippedDueToPrecedence(1) bit is set, the last\n execution of the associated policy condition returned non-zero,\n but the action is not active, because it was trumped by a\n matching policy condition in the same precedence group with a\n higher precedence value.\n\n If the conditionRunTimeException(2) bit is set, the last\n execution of the associated policy condition encountered a\n run-time exception and aborted.\n\n If the conditionUserSignal(3) bit is set, the last\n execution of the associated policy condition called the\n signalError() function.\n\n If the actionRunTimeException(4) bit is set, the last\n execution of the associated policy action encountered a\n run-time exception and aborted.\n\n If the actionUserSignal(5) bit is set, the last\n execution of the associated policy action called the\n signalError() function.\n\n Entries will only exist in this table of one or more bits are\n set. In particular, if an entry does not exist for a\n particular policy/element combination, it can be assumed that\n the policy's condition did not match 'this element'.")
pmTrackingEPTable = MibTable((1, 3, 6, 1, 2, 1, 124, 10), )
if mibBuilder.loadTexts: pmTrackingEPTable.setDescription('The pmTrackingEPTable describes what policies\n are controlling an element. This table is indexed in\n order to optimize retrieval of the status of all policies\n active for a given element.')
pmTrackingEPEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 10, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingEPElement"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingEPContextName"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmTrackingEPContextEngineID"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyIndex"))
if mibBuilder.loadTexts: pmTrackingEPEntry.setDescription("An entry in the pmTrackingEPTable. Entries exist for all\n element/policy combinations for which the policy's condition\n matches and only if the schedule for the policy is active.\n\n The pmPolicyIndex in the index specifies the policy\n tracked by this entry.\n\n Note that some combinations of index values may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP protocol.\n Implementations should take care to avoid such combinations.")
pmTrackingEPElement = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 10, 1, 1), RowPointer())
if mibBuilder.loadTexts: pmTrackingEPElement.setDescription('The element acted upon by the associated policy.\n\n As this object is used in the index for the\n pmTrackingEPTable, users of this table should be careful\n not to create entries that would result in instance names\n with more than 128 sub-identifiers.')
pmTrackingEPContextName = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 10, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32)))
if mibBuilder.loadTexts: pmTrackingEPContextName.setDescription('If the associated element is not in the default SNMP context\n\n\n\n for the target system, this object is used to identify the\n context. If the element is in the default context, this object\n is equal to the empty string.')
pmTrackingEPContextEngineID = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 10, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0,0),ValueSizeConstraint(5,32),)))
if mibBuilder.loadTexts: pmTrackingEPContextEngineID.setDescription('If the associated element is on a remote system, this object\n is used to identify the remote system. This object contains\n the contextEngineID of the system on which the associated\n element resides. If the element is on the local system,\n this object will be the empty string.')
pmTrackingEPStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("on", 1), ("forceOff", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pmTrackingEPStatus.setDescription("This entry will only exist if the calendar for the policy is\n active and if the associated policyCondition returned 1 for\n 'this element'.\n\n A policy can be forcibly disabled on a particular element\n by setting this value to forceOff(2). The agent should then\n act as though the policyCondition failed for 'this element'.\n The forceOff(2) state will persist (even across reboots) until\n this value is set to on(1) by a management request. The\n forceOff(2) state may be set even if the entry does not\n previously exist so that future policy invocations can be\n avoided.\n\n Unless forcibly disabled, if this entry exists, its value\n will be on(1).")
pmDebuggingTable = MibTable((1, 3, 6, 1, 2, 1, 124, 11), )
if mibBuilder.loadTexts: pmDebuggingTable.setDescription('Policies that have debugging turned on will generate a log\n entry in the policy debugging table for every runtime\n exception that occurs in either the condition or action\n code.\n\n The pmDebuggingTable logs debugging messages when\n policies experience run-time exceptions in either the condition\n or action code and the associated pmPolicyDebugging object\n has been turned on.\n\n The maximum number of debugging entries that will be stored\n and the maximum length of time an entry will be kept are an\n implementation-dependent manner. If entries must\n be discarded to make room for new entries, the oldest entries\n must be discarded first.\n\n If the system restarts, all debugging entries may be deleted.')
pmDebuggingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 124, 11, 1), ).setIndexNames((0, "POLICY-BASED-MANAGEMENT-MIB", "pmPolicyIndex"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmDebuggingElement"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmDebuggingContextName"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmDebuggingContextEngineID"), (0, "POLICY-BASED-MANAGEMENT-MIB", "pmDebuggingLogIndex"))
if mibBuilder.loadTexts: pmDebuggingEntry.setDescription('An entry in the pmDebuggingTable. The pmPolicyIndex in the\n index specifies the policy that encountered the exception\n that led to this log entry.\n\n Note that some combinations of index values may result in an\n instance name that exceeds a length of 128 sub-identifiers,\n which exceeds the maximum for the SNMP protocol.\n Implementations should take care to avoid such combinations.')
pmDebuggingElement = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 11, 1, 1), RowPointer())
if mibBuilder.loadTexts: pmDebuggingElement.setDescription("The element the policy was executing on when it encountered\n the error that led to this log entry.\n\n For example, if the element is interface 3, then this object\n will contain the OID for 'ifIndex.3'.\n\n As this object is used in the index for the\n pmDebuggingTable, users of this table should be careful\n not to create entries that would result in instance names\n with more than 128 sub-identifiers.")
pmDebuggingContextName = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 11, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32)))
if mibBuilder.loadTexts: pmDebuggingContextName.setDescription('If the associated element is not in the default SNMP context\n for the target system, this object is used to identify the\n context. If the element is in the default context, this object\n is equal to the empty string.')
pmDebuggingContextEngineID = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 11, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0,0),ValueSizeConstraint(5,32),)))
if mibBuilder.loadTexts: pmDebuggingContextEngineID.setDescription('If the associated element is on a remote system, this object\n is used to identify the remote system. This object contains\n the contextEngineID of the system on which the associated\n element resides. If the element is on the local system,\n this object will be the empty string.')
pmDebuggingLogIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)))
if mibBuilder.loadTexts: pmDebuggingLogIndex.setDescription('A unique index for this log entry among other log entries\n for this policy/element combination.')
pmDebuggingMessage = MibTableColumn((1, 3, 6, 1, 2, 1, 124, 11, 1, 5), PmUTF8String().subtype(subtypeSpec=ValueSizeConstraint(0,128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pmDebuggingMessage.setDescription('An error message generated by the policy execution\n environment. It is recommended that this message include the\n time of day when the message was generated, if known.')
pmNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 124, 0))
pmNewRoleNotification = NotificationType((1, 3, 6, 1, 2, 1, 124, 0, 1)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmRoleStatus"),))
if mibBuilder.loadTexts: pmNewRoleNotification.setDescription('The pmNewRoleNotification is sent when an agent is configured\n with its first instance of a previously unused role string\n (not every time a new element is given a particular role).\n\n An instance of the pmRoleStatus object is sent containing\n the new roleString in its index. In the event that two or\n more elements are given the same role simultaneously, it is an\n implementation-dependent matter as to which pmRoleTable\n instance will be included in the notification.')
pmNewCapabilityNotification = NotificationType((1, 3, 6, 1, 2, 1, 124, 0, 2)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesType"),))
if mibBuilder.loadTexts: pmNewCapabilityNotification.setDescription('The pmNewCapabilityNotification is sent when an agent\n gains a new capability that did not previously exist in any\n element on the system (not every time an element gains a\n particular capability).\n\n An instance of the pmCapabilitiesType object is sent containing\n the identity of the new capability. In the event that two or\n more elements gain the same capability simultaneously, it is an\n implementation-dependent matter as to which pmCapabilitiesType\n instance will be included in the notification.')
pmAbnormalTermNotification = NotificationType((1, 3, 6, 1, 2, 1, 124, 0, 3)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmTrackingPEInfo"),))
if mibBuilder.loadTexts: pmAbnormalTermNotification.setDescription("The pmAbnormalTermNotification is sent when a policy's\n pmPolicyAbnormalTerminations gauge value changes from zero to\n any value greater than zero and no such notification has been\n sent for that policy in the last 5 minutes.\n\n The notification contains an instance of the pmTrackingPEInfo\n object where the pmPolicyIndex component of the index\n identifies the associated policy and the rest of the index\n identifies an element on which the policy failed.")
pmConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 124, 12))
pmCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 124, 12, 1))
pmGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 124, 12, 2))
pmCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 124, 12, 1, 1)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyManagementGroup"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedGroup"), ("POLICY-BASED-MANAGEMENT-MIB", "pmNotificationGroup"),))
if mibBuilder.loadTexts: pmCompliance.setDescription('Describes the requirements for conformance to\n the Policy-Based Management MIB')
pmPolicyManagementGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 124, 12, 2, 1)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyPrecedenceGroup"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyPrecedence"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicySchedule"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyElementTypeFilter"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyConditionScriptIndex"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyActionScriptIndex"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyParameters"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyConditionMaxLatency"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyActionMaxLatency"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyMaxIterations"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyDescription"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyMatches"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyAbnormalTerminations"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyExecutionErrors"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyDebugging"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyStorageType"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyAdminStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyRowStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyCodeText"), ("POLICY-BASED-MANAGEMENT-MIB", "pmPolicyCodeStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmElementTypeRegMaxLatency"), ("POLICY-BASED-MANAGEMENT-MIB", "pmElementTypeRegDescription"), ("POLICY-BASED-MANAGEMENT-MIB", "pmElementTypeRegStorageType"), ("POLICY-BASED-MANAGEMENT-MIB", "pmElementTypeRegRowStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmRoleStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesType"), ("POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesOverrideState"), ("POLICY-BASED-MANAGEMENT-MIB", "pmCapabilitiesOverrideRowStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmTrackingPEInfo"), ("POLICY-BASED-MANAGEMENT-MIB", "pmTrackingEPStatus"), ("POLICY-BASED-MANAGEMENT-MIB", "pmDebuggingMessage"),))
if mibBuilder.loadTexts: pmPolicyManagementGroup.setDescription('Objects that allow for the creation and management of\n configuration policies.')
pmSchedGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 124, 12, 2, 2)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmSchedLocalTime"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedGroupIndex"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedDescr"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedTimePeriod"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedMonth"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedDay"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedWeekDay"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedTimeOfDay"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedLocalOrUtc"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedStorageType"), ("POLICY-BASED-MANAGEMENT-MIB", "pmSchedRowStatus"),))
if mibBuilder.loadTexts: pmSchedGroup.setDescription('Objects that allow for the scheduling of policies.')
pmNotificationGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 124, 12, 2, 3)).setObjects(*(("POLICY-BASED-MANAGEMENT-MIB", "pmNewRoleNotification"), ("POLICY-BASED-MANAGEMENT-MIB", "pmNewCapabilityNotification"), ("POLICY-BASED-MANAGEMENT-MIB", "pmAbnormalTermNotification"),))
if mibBuilder.loadTexts: pmNotificationGroup.setDescription('Notifications sent by an Policy MIB agent.')
pmBaseFunctionLibrary = MibIdentifier((1, 3, 6, 1, 2, 1, 124, 12, 2, 4))
mibBuilder.exportSymbols("POLICY-BASED-MANAGEMENT-MIB", pmRoleString=pmRoleString, pmSchedIndex=pmSchedIndex, pmCapabilitiesOverrideEntry=pmCapabilitiesOverrideEntry, pmPolicyActionMaxLatency=pmPolicyActionMaxLatency, pmSchedTimeOfDay=pmSchedTimeOfDay, pmTrackingPEContextEngineID=pmTrackingPEContextEngineID, pmPolicyDescription=pmPolicyDescription, PmUTF8String=PmUTF8String, pmDebuggingMessage=pmDebuggingMessage, pmElementTypeRegRowStatus=pmElementTypeRegRowStatus, pmDebuggingContextName=pmDebuggingContextName, pmPolicyCodeSegment=pmPolicyCodeSegment, pmPolicyManagementGroup=pmPolicyManagementGroup, pmPolicyMaxIterations=pmPolicyMaxIterations, pmPolicyExecutionErrors=pmPolicyExecutionErrors, pmCompliance=pmCompliance, PYSNMP_MODULE_ID=pmMib, pmPolicyPrecedenceGroup=pmPolicyPrecedenceGroup, pmPolicyMatches=pmPolicyMatches, pmPolicySchedule=pmPolicySchedule, pmNewRoleNotification=pmNewRoleNotification, pmPolicyDebugging=pmPolicyDebugging, pmAbnormalTermNotification=pmAbnormalTermNotification, pmCapabilitiesOverrideState=pmCapabilitiesOverrideState, pmTrackingEPContextName=pmTrackingEPContextName, pmPolicyAdminStatus=pmPolicyAdminStatus, pmSchedEntry=pmSchedEntry, pmSchedLocalTime=pmSchedLocalTime, pmPolicyConditionScriptIndex=pmPolicyConditionScriptIndex, pmCapabilitiesOverrideRowStatus=pmCapabilitiesOverrideRowStatus, pmPolicyCodeStatus=pmPolicyCodeStatus, pmElementTypeRegTable=pmElementTypeRegTable, pmSchedTimePeriod=pmSchedTimePeriod, pmRoleContextName=pmRoleContextName, pmTrackingEPEntry=pmTrackingEPEntry, pmRoleContextEngineID=pmRoleContextEngineID, pmDebuggingElement=pmDebuggingElement, pmDebuggingContextEngineID=pmDebuggingContextEngineID, pmElementTypeRegMaxLatency=pmElementTypeRegMaxLatency, pmPolicyEntry=pmPolicyEntry, pmPolicyElementTypeFilter=pmPolicyElementTypeFilter, pmMib=pmMib, pmSchedDay=pmSchedDay, pmCapabilitiesTable=pmCapabilitiesTable, pmCompliances=pmCompliances, pmPolicyAdminGroup=pmPolicyAdminGroup, pmCapabilitiesOverrideTable=pmCapabilitiesOverrideTable, pmNotifications=pmNotifications, pmTrackingEPStatus=pmTrackingEPStatus, pmTrackingPEElement=pmTrackingPEElement, pmRoleTable=pmRoleTable, pmConformance=pmConformance, pmDebuggingTable=pmDebuggingTable, pmPolicyPrecedence=pmPolicyPrecedence, pmPolicyCodeEntry=pmPolicyCodeEntry, pmSchedTable=pmSchedTable, pmCapabilitiesType=pmCapabilitiesType, pmSchedGroup=pmSchedGroup, pmPolicyCodeText=pmPolicyCodeText, pmRoleStatus=pmRoleStatus, pmTrackingPETable=pmTrackingPETable, pmRoleElement=pmRoleElement, pmSchedWeekDay=pmSchedWeekDay, pmPolicyCodeTable=pmPolicyCodeTable, pmElementTypeRegEntry=pmElementTypeRegEntry, pmTrackingEPTable=pmTrackingEPTable, pmPolicyIndex=pmPolicyIndex, pmElementTypeRegOIDPrefix=pmElementTypeRegOIDPrefix, pmPolicyActionScriptIndex=pmPolicyActionScriptIndex, pmSchedGroupIndex=pmSchedGroupIndex, pmNewCapabilityNotification=pmNewCapabilityNotification, pmDebuggingEntry=pmDebuggingEntry, pmRoleEntry=pmRoleEntry, pmPolicyCodeScriptIndex=pmPolicyCodeScriptIndex, pmSchedStorageType=pmSchedStorageType, pmTrackingEPContextEngineID=pmTrackingEPContextEngineID, pmPolicyTable=pmPolicyTable, pmTrackingPEContextName=pmTrackingPEContextName, pmPolicyStorageType=pmPolicyStorageType, pmSchedLocalOrUtc=pmSchedLocalOrUtc, pmBaseFunctionLibrary=pmBaseFunctionLibrary, pmNotificationGroup=pmNotificationGroup, pmDebuggingLogIndex=pmDebuggingLogIndex, pmGroups=pmGroups, pmCapabilitiesEntry=pmCapabilitiesEntry, pmPolicyAbnormalTerminations=pmPolicyAbnormalTerminations, pmSchedMonth=pmSchedMonth, pmSchedDescr=pmSchedDescr, pmElementTypeRegDescription=pmElementTypeRegDescription, pmPolicyParameters=pmPolicyParameters, pmElementTypeRegStorageType=pmElementTypeRegStorageType, pmPolicyConditionMaxLatency=pmPolicyConditionMaxLatency, pmTrackingEPElement=pmTrackingEPElement, pmPolicyRowStatus=pmPolicyRowStatus, pmTrackingPEInfo=pmTrackingPEInfo, pmSchedRowStatus=pmSchedRowStatus, pmCapabilitiesOverrideType=pmCapabilitiesOverrideType, pmTrackingPEEntry=pmTrackingPEEntry)
| 419.464115
| 4,000
| 0.71935
| 12,101
| 87,668
| 5.211057
| 0.111644
| 0.005772
| 0.004757
| 0.00628
| 0.358791
| 0.312723
| 0.27035
| 0.252827
| 0.232893
| 0.22111
| 0
| 0.03601
| 0.195111
| 87,668
| 208
| 4,001
| 421.480769
| 0.857645
| 0.003764
| 0
| 0
| 0
| 0.4
| 0.71691
| 0.055503
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.025
| 0.035
| 0
| 0.045
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f2c7dab2890b7f29703291212dfc57406365fba6
| 112
|
py
|
Python
|
Exercise_4_8.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_4_8.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
Exercise_4_8.py
|
kushrami/Python-Crash-Course-book-Excersice
|
7093181940a90d9f4bab5775ef56f57963450393
|
[
"Apache-2.0"
] | null | null | null |
#cubes
list = []
for number in range(1,11):
list.append(number**3)
for number in list:
print(number)
| 11.2
| 26
| 0.633929
| 18
| 112
| 3.944444
| 0.611111
| 0.253521
| 0.309859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.223214
| 112
| 9
| 27
| 12.444444
| 0.770115
| 0.044643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b3f0937b9be14faeaa713b3a310910118e6b68b
| 145
|
py
|
Python
|
config/conf.py
|
lllllaurel/bravo
|
2c25f5af1779e2662533a64ca8b2f4e40d96fd07
|
[
"Apache-2.0"
] | null | null | null |
config/conf.py
|
lllllaurel/bravo
|
2c25f5af1779e2662533a64ca8b2f4e40d96fd07
|
[
"Apache-2.0"
] | null | null | null |
config/conf.py
|
lllllaurel/bravo
|
2c25f5af1779e2662533a64ca8b2f4e40d96fd07
|
[
"Apache-2.0"
] | null | null | null |
from urllib import parse
#数据库配置
class DatabaseConfig():
DBNAME = 'admin'
USER = 'bravo'
PASSWORD = parse.unquote_plus('Jujiao2020%')
| 20.714286
| 48
| 0.689655
| 16
| 145
| 6.1875
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.193103
| 145
| 7
| 48
| 20.714286
| 0.811966
| 0.034483
| 0
| 0
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
4b49cb90139afc69c594095e7a0ece4f01e09d1b
| 204
|
py
|
Python
|
step03.py
|
ml-as-a-service/scrapping-example
|
a6ddcd0cc03c382b81b89ca953cbfc74c95b62a4
|
[
"MIT"
] | null | null | null |
step03.py
|
ml-as-a-service/scrapping-example
|
a6ddcd0cc03c382b81b89ca953cbfc74c95b62a4
|
[
"MIT"
] | null | null | null |
step03.py
|
ml-as-a-service/scrapping-example
|
a6ddcd0cc03c382b81b89ca953cbfc74c95b62a4
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
driver = webdriver.Chrome('/usr/bin/chromedriver')
driver.get('https://hoopshype.com/salaries/players/')
| 34
| 53
| 0.803922
| 28
| 204
| 5.857143
| 0.714286
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 204
| 6
| 53
| 34
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0.292683
| 0.102439
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4b5a662386f9621559f98785b5c92045886293dd
| 148
|
py
|
Python
|
django_mailbox2/transports/mh.py
|
sunset-crew/django-mailbox
|
e9f6079c66e70e705f3c968e2e8c7b00c182ef40
|
[
"MIT"
] | null | null | null |
django_mailbox2/transports/mh.py
|
sunset-crew/django-mailbox
|
e9f6079c66e70e705f3c968e2e8c7b00c182ef40
|
[
"MIT"
] | null | null | null |
django_mailbox2/transports/mh.py
|
sunset-crew/django-mailbox
|
e9f6079c66e70e705f3c968e2e8c7b00c182ef40
|
[
"MIT"
] | null | null | null |
from mailbox import MH
from django_mailbox2.transports.generic import GenericFileMailbox
class MHTransport(GenericFileMailbox):
_variant = MH
| 21.142857
| 65
| 0.831081
| 16
| 148
| 7.5625
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.128378
| 148
| 6
| 66
| 24.666667
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4b76d48b54e922cc8ff511a526cbf261e7dac189
| 14,467
|
py
|
Python
|
python/GradFuncs.py
|
ziyuchen7/SpecNet2
|
ae814e6e1e9a823bd221394a566c461fac6d0a59
|
[
"MIT"
] | null | null | null |
python/GradFuncs.py
|
ziyuchen7/SpecNet2
|
ae814e6e1e9a823bd221394a566c461fac6d0a59
|
[
"MIT"
] | null | null | null |
python/GradFuncs.py
|
ziyuchen7/SpecNet2
|
ae814e6e1e9a823bd221394a566c461fac6d0a59
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from functools import reduce
import time
def specnet1_grad_fullu(model, batch_idx, matdata):
n = matdata.Size()
W = matdata.Wmat(rowidx=batch_idx)
D = matdata.Dmat()
Y = model.rotate(matdata.Xdata(), D = D)
# DY = torch.mul(D[batch_idx], Y[batch_idx,:])
WY = torch.matmul(W, Y)
DinvWY = torch.mul(1./D[batch_idx], WY)
grad_LA = 2*(Y[batch_idx,:] - DinvWY)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
apploss.backward()
def specnet1_grad_localu(model, batch_idx, matdata):
# start = time.time()
n_batch = len(batch_idx)
W_batch = matdata.Wmat(rowidx=batch_idx, colidx=batch_idx)
D_batch = torch.sum(W_batch, 1, keepdim=True)
Y_batch = model.rotate(matdata.Xdata(batch_idx), D=D_batch)
# DY_batch = torch.mul(D_batch, Y_batch)
WY_batch = torch.matmul(W_batch, Y_batch)
DinvWY_batch = torch.mul(1./D_batch, WY_batch)
grad_LA = 2*(Y_batch - DinvWY_batch)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
# end = time.time()
# print(end-start)
def specnet1_grad_fake_neighboru(model, batch_idx, matdata):
n = matdata.Size()
n_batch = len(batch_idx)
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
# nbr_idx = torch.LongTensor(range(n))
D = matdata.Dmat()
Z = model(matdata.Xdata())
YDY, DY_nbr, _ = update_YDY(model, nbr_idx, Z[nbr_idx,:], matdata)
# YDY = YDY.detach()
Y = model.rotate(matdata.Xdata(), D = D, rotate_type = 'ydy', ydy = YDY)
# DY_batch = torch.mul(D[batch_idx], Y[batch_idx,:])
WY_batch = torch.zeros(n_batch, Y.shape[1])
for i in range(n_batch):
WY_batch[i,:] = torch.sum(torch.mul(sp_val_batch[i],
Y[sp_nbr_idx_batch[i].reshape(-1),:]),
dim=0)
DinvWY_batch = torch.mul(1./D[batch_idx], WY_batch)
grad_LA = 2*(Y[batch_idx,:] - DinvWY_batch)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
apploss.backward()
## SpecNet1 loss_full uses all the data
def specnet1_grad_full(model, batch_idx, matdata):
W = matdata.Wmat(rowidx=batch_idx)
D = matdata.Dmat()
Y = model.rotate(matdata.Xdata())
DinvY = torch.mul(1./torch.sqrt(D), Y)
WDinvY = torch.matmul(W, DinvY)
DinvWDinvY = torch.mul(1./torch.sqrt(D[batch_idx]), WDinvY)
grad_LA = 2*(Y[batch_idx,:] - DinvWDinvY)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
apploss.backward()
## SpecNet1 loss_local only uses mini-batch
def specnet1_grad_local(model, batch_idx, matdata):
n_batch = len(batch_idx)
Y_batch = model.rotate(matdata.Xdata(batch_idx))
W_batch = matdata.Wmat(rowidx=batch_idx, colidx=batch_idx)
D_batch = torch.sum(W_batch, 1, keepdim=True)
DinvY_batch = torch.mul(1./torch.sqrt(D_batch), Y_batch)
WDinvY_batch = torch.matmul(W_batch, DinvY_batch)
DinvWDinvY_batch = torch.mul(1./torch.sqrt(D_batch), WDinvY_batch)
grad_LA = 2*(Y_batch - DinvWDinvY_batch)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
## SpecNet1 loss_local only uses mini-batch neighbor
def specnet1_grad_neighbor(model, batch_idx, matdata):
n = matdata.Size()
n_batch = len(batch_idx)
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
batch_idx_nbr = torch.LongTensor([torch.nonzero(nbr_idx==i, \
as_tuple=False) for i in batch_idx])
D_nbr = matdata.Dmat(nbr_idx)
with torch.no_grad():
Y_nbr = model(matdata.Xdata(nbr_idx))
update_YDY(model, nbr_idx, Y_nbr, matdata, DisId=True)
Y_nbr = model.rotate(matdata.Xdata(nbr_idx), rotate_type = 'ydy')
DinvY_nbr = torch.mul(1./torch.sqrt(D_nbr), Y_nbr)
WDinvY_batch = torch.zeros(n_batch, DinvY_nbr.shape[1])
for i in range(n_batch):
idx = torch.LongTensor([torch.nonzero(nbr_idx==j, \
as_tuple=False) for j in sp_nbr_idx_batch[i]])
WDinvY_batch[i,:] = torch.sum(torch.mul(sp_val_batch[i],
DinvY_nbr[idx,:]), dim=0)
grad_LA = Y_nbr[batch_idx_nbr,:] - \
torch.mul(1./torch.sqrt(D_nbr[batch_idx_nbr]), WDinvY_batch)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_nbr[batch_idx_nbr,:].t(), grad_LA))
apploss.backward()
def specnet1_grad_fake_neighbor(model, batch_idx, matdata):
n_batch = len(batch_idx)
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
D = matdata.Dmat()
Y = model(matdata.Xdata())
update_YDY(model, nbr_idx, Y[nbr_idx,:], matdata, DisId=True)
Y = model.rotate(matdata.Xdata(), rotate_type = 'ydy')
DinvY = torch.mul(1./torch.sqrt(D), Y)
WDinvY_batch = torch.zeros(n_batch, DinvY.shape[1])
for i in range(n_batch):
WDinvY_batch[i,:] = torch.sum(torch.mul(sp_val_batch[i],
DinvY[sp_nbr_idx_batch[i].reshape(-1),:]),
dim=0)
grad_LA = 2*(Y[batch_idx,:] - \
torch.mul(1./torch.sqrt(D[batch_idx]), WDinvY_batch))
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
apploss.backward()
## SpecNet2 loss_full uses all the data
# def specnet2_grad_full(model, batch_idx, matdata):
# n = matdata.Size()
# Y = model(matdata.Xdata())
# W = matdata.Wmat(rowidx=batch_idx)
# WY = torch.matmul(W, Y)
# YDY, DY = update_YDY(model, range(matdata.Size()), Y, matdata)
# grad_LA = 4*(- WY/n + torch.matmul(DY[batch_idx,:], YDY)/n**3)
# grad_LA = grad_LA.detach()
# apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
# apploss.backward()
def specnet2_grad_full(model, batch_idx, matdata):
n = matdata.Size()
Y = model(matdata.Xdata())
W = matdata.Wmat(rowidx=batch_idx)
D = matdata.Dmat(batch_idx)
WY = torch.matmul(W, Y)
YDY, DY, DtY = update_YDY(model, range(matdata.Size()), Y, matdata)
DDtY = torch.mul(D, DtY)
Y_batch = model(matdata.Xdata(batch_idx))
grad_LA = 4*(- WY/n + DDtY/n+ torch.matmul(DY[batch_idx,:], YDY)/n**3)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
def specnet2_grad_symfull(model, batch_idx, matdata):
n = matdata.Size()
Y = model(matdata.Xdata())
W = matdata.Wmat(rowidx=batch_idx)
D = matdata.Dmat()
DinvY = torch.mul(1./torch.sqrt(D), Y)
WDinvY = torch.matmul(W, DinvY)
DinvWDinvY = torch.mul(1./torch.sqrt(D[batch_idx]), WDinvY)
YY, _, _ = update_YDY(model, range(matdata.Size()), Y, matdata, DisId=True)
v1 = torch.ones(n,1)/np.sqrt(n)
DDtY = torch.mul(v1[batch_idx,:], torch.matmul(v1.t(),Y))
grad_LA = 4*(- DinvWDinvY + DDtY+ torch.matmul(Y[batch_idx,:], YY)/n)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
apploss.backward()
## SpecNet2 loss_local only uses mini-batch
def specnet2_grad_local(model, batch_idx, matdata):
n_batch = len(batch_idx)
Y_batch = model(matdata.Xdata(batch_idx))
W_batch = matdata.Wmat(rowidx=batch_idx, colidx=batch_idx)
D_batch = torch.sum(W_batch, 1, keepdim=True)
Dt = D_batch.t()/torch.matmul(torch.sqrt(D_batch.t()),torch.sqrt(D_batch))
DDtY = torch.mul(D_batch, torch.matmul(Dt, Y_batch))
WY_batch = torch.matmul(W_batch, Y_batch)
DY_batch = torch.mul(D_batch, Y_batch)
YDY_batch = torch.matmul(Y_batch.t(), DY_batch)
grad_LA = 4*(- WY_batch/n_batch + DDtY/n_batch\
+ torch.matmul(DY_batch, YDY_batch)/n_batch**3)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
def specnet2_grad_neighboru(model, batch_idx, matdata):
start = time.time()
n = matdata.Size()
n_batch = len(batch_idx)
start4 = time.time()
D = matdata.Dmat(batch_idx)
end4 = time.time()
start5 = time.time()
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
batch_idx_nbr = torch.LongTensor([torch.nonzero(nbr_idx==i, \
as_tuple=False) for i in batch_idx])
end5 = time.time()
start2 = time.time()
Y_nbr = model(matdata.Xdata(nbr_idx))
Y_batch = model(matdata.Xdata(batch_idx))
end2 = time.time()
start1 = time.time()
W_batch = matdata.Wmat(rowidx=batch_idx, colidx=nbr_idx)
WY_batch = torch.matmul(W_batch, Y_nbr)
end1 = time.time()
start6 = time.time()
YDY, DY_nbr, DtY = update_YDY(model, nbr_idx, Y_nbr, matdata)
DDtY = torch.mul(D, DtY)
grad_LA = 4*(- WY_batch/n + DDtY/n \
+ torch.matmul(DY_nbr[batch_idx_nbr,:], YDY)/n**3)
grad_LA = grad_LA.detach()
end6 = time.time()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
start3 = time.time()
apploss.backward()
end3 = time.time()
return end1-start1, end2-start2, end3-start3, end3-start, end4-start4, end5-start5, end6-start6
## SpecNet2 loss_local only uses mini-batch neighbor
def specnet2_grad_neighbor(model, batch_idx, matdata):
n = matdata.Size()
n_batch = len(batch_idx)
D = matdata.Dmat(batch_idx)
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
nbr_idx = nbr_idx.reshape(-1)
batch_idx_nbr = torch.LongTensor([torch.nonzero(nbr_idx==i, \
as_tuple=False) for i in batch_idx])
Y_nbr = model(matdata.Xdata(nbr_idx))
Y_batch = model(matdata.Xdata(batch_idx))
W_batch = matdata.Wmat(rowidx=batch_idx, colidx=nbr_idx)
WY_batch = torch.matmul(W_batch, Y_nbr)
YDY, DY_nbr, DtY = update_YDY(model, nbr_idx, Y_nbr, matdata)
DDtY = torch.mul(D, DtY)
grad_LA = 4*(- WY_batch/n + DDtY/n \
+ torch.matmul(DY_nbr[batch_idx_nbr,:], YDY)/n**3)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
def specnet2_grad_fake_neighbor(model, batch_idx, matdata):
n = matdata.Size()
n_batch = len(batch_idx)
D = matdata.Dmat(batch_idx)
Dt = matdata.Dtmat()
sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
batch_idx_nbr = torch.LongTensor([torch.nonzero(nbr_idx==i, \
as_tuple=False) for i in batch_idx])
Y = model(matdata.Xdata())
Y_batch = model(matdata.Xdata(batch_idx))
WY_batch = torch.zeros(n_batch, Y.shape[1])
for i in range(n_batch):
WY_batch[i,:] = torch.sum(torch.mul(sp_val_batch[i], Y[sp_nbr_idx_batch[i].reshape(-1),:]),
dim=0)
YDY, DY_nbr, Y_part = update_YDY(model, nbr_idx, Y[nbr_idx,:], matdata)
DDtY = torch.mul(D, torch.matmul(Dt, Y_part))
grad_LA = 4*(- WY_batch/n + DDtY/n \
+ torch.matmul(DY_nbr[batch_idx_nbr,:], YDY)/n**3)
grad_LA = grad_LA.detach()
apploss = torch.trace(torch.matmul(Y_batch.t(), grad_LA))
apploss.backward()
# def specnet2_grad_fake_neighbor(model, batch_idx, matdata):
# n = matdata.Size()
# n_batch = len(batch_idx)
# sp_nbr_idx_batch, sp_val_batch = matdata.spWmat(rowidx=batch_idx)
# nbr_idx = torch.LongTensor(reduce(np.union1d, sp_nbr_idx_batch))
# batch_idx_nbr = torch.LongTensor([torch.nonzero(nbr_idx==i, \
# as_tuple=False) for i in batch_idx])
# Y = model(matdata.Xdata())
# WY_batch = torch.zeros(n_batch, Y.shape[1])
# for i in range(n_batch):
# WY_batch[i,:] = torch.sum(torch.mul(sp_val_batch[i], Y[sp_nbr_idx_batch[i].reshape(-1),:]),
# dim=0)
# YDY, DY_nbr = update_YDY(model, nbr_idx, Y[nbr_idx,:], matdata)
# grad_LA = 4*(- WY_batch/n \
# + torch.matmul(DY_nbr[batch_idx_nbr,:], YDY)/n**3)
# grad_LA = grad_LA.detach()
# apploss = torch.trace(torch.matmul(Y[batch_idx,:].t(), grad_LA))
# apploss.backward()
## Updating YDY approximately
def update_YDY(model = None, idx = None, Y_idx = None, matdata = None,
DisId=False, GetYDY=False):
if update_YDY.Y_old is None:
model.eval()
with torch.no_grad():
update_YDY.Y_old = model(matdata.Xdata())
update_YDY.Y_old = update_YDY.Y_old.detach()
if not DisId:
D = matdata.Dmat()
Dt = matdata.Dtmat()
update_YDY.YDY = torch.matmul(
update_YDY.Y_old.t(), torch.mul(D,
update_YDY.Y_old))
update_YDY.DtY = torch.matmul(Dt, update_YDY.Y_old)
else:
update_YDY.YDY = torch.matmul(
update_YDY.Y_old.t(), update_YDY.Y_old)
if GetYDY:
return update_YDY.YDY
Y_idx = Y_idx.detach()
Y_old_idx = update_YDY.Y_old[idx,:]
# Y_idx = Y_idx.detach()
if not DisId:
D_idx = matdata.Dmat(idx)
Dt_idx = matdata.Dtmat(idx)
DY_idx = torch.mul(D_idx, Y_idx)
DY_old_idx = torch.mul(D_idx, Y_old_idx)
DtY_idx = torch.matmul(Dt_idx, Y_idx)
DtY_old_idx = torch.matmul(Dt_idx, Y_old_idx)
else:
DY_idx = Y_idx
DY_old_idx = Y_old_idx
update_YDY.YDY = update_YDY.YDY \
- torch.matmul(Y_old_idx.t(), DY_old_idx) \
+ torch.matmul(Y_idx.t(), DY_idx)
update_YDY.DtY = update_YDY.DtY \
- DtY_old_idx + DtY_idx
update_YDY.Y_old[idx,:] = Y_idx
# update_YDY.YDY = update_YDY.YDY \
# - torch.matmul(Y_old_idx.t(), DY_old_idx) \
# + torch.matmul(Y_idx.t(), DY_idx)
# update_YDY.Y_old[idx,:] = Y_idx
return update_YDY.YDY, DY_idx, update_YDY.DtY
update_YDY.Y_old = None
update_YDY.YDY = None
update_YDY.DtY = None
| 34.36342
| 101
| 0.637174
| 2,220
| 14,467
| 3.891441
| 0.056757
| 0.090751
| 0.02917
| 0.028591
| 0.83123
| 0.795578
| 0.760042
| 0.706216
| 0.633407
| 0.592545
| 0
| 0.009667
| 0.220571
| 14,467
| 421
| 102
| 34.36342
| 0.756474
| 0.141633
| 0
| 0.57037
| 0
| 0
| 0.000728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051852
| false
| 0
| 0.014815
| 0
| 0.077778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b7bfa9e65393125315c4abe3f0b69c8d5b9f62c
| 148
|
py
|
Python
|
packages/qsmstoken/qsmstoken/clients/alidayusms/__init__.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 12
|
2020-02-01T01:52:01.000Z
|
2021-04-28T15:06:43.000Z
|
packages/qsmstoken/qsmstoken/clients/alidayusms/__init__.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 5
|
2020-02-06T08:07:58.000Z
|
2020-06-02T13:03:45.000Z
|
packages/qsmstoken/qsmstoken/clients/alidayusms/__init__.py
|
lianxiaopang/camel-store-api
|
b8021250bf3d8cf7adc566deebdba55225148316
|
[
"Apache-2.0"
] | 11
|
2020-02-03T13:07:46.000Z
|
2020-11-29T01:44:06.000Z
|
import os
import sys
parent_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent_dir)
from .services import AlidayuSMSClient
| 21.142857
| 55
| 0.804054
| 23
| 148
| 4.913043
| 0.608696
| 0.159292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.094595
| 148
| 6
| 56
| 24.666667
| 0.835821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
29b6bf701d9bdedb3ca4d562964e8b817b848384
| 98
|
py
|
Python
|
lattice_sync/apps.py
|
PressLabs/zinc
|
9e1dc852f31f9897e7759962cf0f3e6d42fbe637
|
[
"Apache-2.0"
] | 29
|
2017-06-29T15:03:49.000Z
|
2018-01-30T14:07:26.000Z
|
lattice_sync/apps.py
|
presslabs/zinc
|
94146e5203fc93ee0e8bb011a4db0ffcd4b0096e
|
[
"Apache-2.0"
] | 9
|
2019-01-11T09:07:17.000Z
|
2022-02-03T12:50:21.000Z
|
lattice_sync/apps.py
|
PressLabs/zinc
|
9e1dc852f31f9897e7759962cf0f3e6d42fbe637
|
[
"Apache-2.0"
] | 1
|
2020-08-09T18:17:25.000Z
|
2020-08-09T18:17:25.000Z
|
from django.apps import AppConfig
class LatticeSyncConfig(AppConfig):
name = 'lattice_sync'
| 16.333333
| 35
| 0.77551
| 11
| 98
| 6.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 36
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
29d68863e335a8d647877b64ca4d2bc2812a52b5
| 124
|
py
|
Python
|
Recommender/test.py
|
wpwbb510582246/PocketFIlm
|
356d057810fd48a77197fe0f00b1f2adccb02d39
|
[
"MIT"
] | 17
|
2019-09-11T08:37:26.000Z
|
2021-08-17T12:08:54.000Z
|
Recommender/test.py
|
wpwbb510582246/PocketFIlm
|
356d057810fd48a77197fe0f00b1f2adccb02d39
|
[
"MIT"
] | 5
|
2019-10-26T00:28:35.000Z
|
2021-05-08T09:10:36.000Z
|
Recommender/test.py
|
wpwbb510582246/PocketFIlm
|
356d057810fd48a77197fe0f00b1f2adccb02d39
|
[
"MIT"
] | 8
|
2020-03-17T08:17:35.000Z
|
2021-07-30T15:48:36.000Z
|
from util.CommonUtils import *
from util.MongoDbUtils import MongoDbUtils
if __name__ == '__main__':
print(get_today())
| 24.8
| 42
| 0.766129
| 15
| 124
| 5.733333
| 0.733333
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137097
| 124
| 5
| 43
| 24.8
| 0.803738
| 0
| 0
| 0
| 0
| 0
| 0.064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
29eb98577605361c475a71b506aa7bffb53980dd
| 109
|
py
|
Python
|
src/words/__init__.py
|
winkidney/MakeABit
|
8438b177218fadd6bd8faeedac1b586429e9bc7f
|
[
"MIT"
] | 1
|
2016-09-08T07:46:39.000Z
|
2016-09-08T07:46:39.000Z
|
src/words/__init__.py
|
winkidney/MakeABit
|
8438b177218fadd6bd8faeedac1b586429e9bc7f
|
[
"MIT"
] | null | null | null |
src/words/__init__.py
|
winkidney/MakeABit
|
8438b177218fadd6bd8faeedac1b586429e9bc7f
|
[
"MIT"
] | null | null | null |
import os
HERE = os.path.abspath(os.path.dirname(__file__))
TEMPLATES_DIR = os.path.join(HERE, "templates")
| 21.8
| 49
| 0.752294
| 17
| 109
| 4.529412
| 0.588235
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091743
| 109
| 5
| 50
| 21.8
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.081818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4b1a1cc40d3da2f488c81d227ffdce85b53f6ac8
| 166
|
py
|
Python
|
chunked_media/apps.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
chunked_media/apps.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
chunked_media/apps.py
|
CerealBoxMedia/chunked_media
|
324d9b7d76323fa1ca5296d733b82dc5ab5f98c0
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
class ChunkedMediaAppConfig(AppConfig):
name = 'chunked_media'
label = 'chunked_media'
verbose_name = "Chunked media"
| 20.75
| 39
| 0.740964
| 18
| 166
| 6.666667
| 0.666667
| 0.3
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 166
| 7
| 40
| 23.714286
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.23494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b260f10cf9f7e288044481e9b95e59f63c5878a
| 465
|
py
|
Python
|
wwdtm/panelist/__init__.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | null | null | null |
wwdtm/panelist/__init__.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | 1
|
2022-01-17T04:25:49.000Z
|
2022-01-17T04:25:49.000Z
|
wwdtm/panelist/__init__.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2021 Linh Pham
# wwdtm is released under the terms of the Apache License 2.0
"""Wait Wait Stats: Panelist module"""
from wwdtm.panelist.appearances import PanelistAppearances
from wwdtm.panelist.panelist import Panelist
from wwdtm.panelist.scores import PanelistScores
from wwdtm.panelist.statistics import PanelistStatistics
from wwdtm.panelist.utility import PanelistUtility
| 38.75
| 61
| 0.795699
| 65
| 465
| 5.692308
| 0.646154
| 0.121622
| 0.22973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031785
| 0.12043
| 465
| 11
| 62
| 42.272727
| 0.872861
| 0.404301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d9a129abd63f0d9c386cac6d0ce1e4a9b6222916
| 199
|
py
|
Python
|
ml_web/users/tests/test_models.py
|
mahidul-islam/ml_web
|
6e4b0f034aaa6c86b53dc2edb7c5123419e0541d
|
[
"MIT"
] | 1
|
2021-08-19T17:42:31.000Z
|
2021-08-19T17:42:31.000Z
|
ml_web/users/tests/test_models.py
|
mahidul-islam/ml_web
|
6e4b0f034aaa6c86b53dc2edb7c5123419e0541d
|
[
"MIT"
] | null | null | null |
ml_web/users/tests/test_models.py
|
mahidul-islam/ml_web
|
6e4b0f034aaa6c86b53dc2edb7c5123419e0541d
|
[
"MIT"
] | null | null | null |
import pytest
from ml_web.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 19.9
| 64
| 0.768844
| 31
| 199
| 4.677419
| 0.645161
| 0.096552
| 0.206897
| 0.248276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120603
| 199
| 9
| 65
| 22.111111
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.115578
| 0.115578
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d9b83d7ce43d5ceb515bbb8e5157b420964a03e2
| 541
|
py
|
Python
|
cms/templatetags/cms_tags.py
|
eldarion-client/hedera
|
a01d44232fadab4e60e8d06971feba9fad21e3f2
|
[
"MIT"
] | 8
|
2018-10-25T19:30:49.000Z
|
2021-07-27T18:16:42.000Z
|
cms/templatetags/cms_tags.py
|
eldarion-client/hedera
|
a01d44232fadab4e60e8d06971feba9fad21e3f2
|
[
"MIT"
] | 233
|
2018-11-01T14:30:25.000Z
|
2022-03-30T16:38:24.000Z
|
cms/templatetags/cms_tags.py
|
eldarion-client/hedera
|
a01d44232fadab4e60e8d06971feba9fad21e3f2
|
[
"MIT"
] | 3
|
2019-05-16T14:57:55.000Z
|
2021-03-16T13:05:01.000Z
|
from django import template
from cms.models import FlatPage, HomePage
register = template.Library()
@register.simple_tag
def get_home_page():
return HomePage.objects.first()
@register.simple_tag
def get_site_nav():
return FlatPage.objects.live().filter(show_in_menus=True).order_by('title')
@register.simple_tag
def get_flatpage_nav():
return HomePage.objects.first().get_children().live().order_by('title')
@register.simple_tag()
def get_flatpage_subnav(page):
return page.get_children().live().order_by('title')
| 20.037037
| 79
| 0.759704
| 76
| 541
| 5.157895
| 0.421053
| 0.142857
| 0.173469
| 0.204082
| 0.443878
| 0.326531
| 0.219388
| 0.219388
| 0.219388
| 0
| 0
| 0
| 0.109057
| 541
| 26
| 80
| 20.807692
| 0.813278
| 0
| 0
| 0.2
| 0
| 0
| 0.027726
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0
| 0.133333
| 0.266667
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d9c789fed2e7686c7e0513ac9e434c3f6ff56b9b
| 492
|
py
|
Python
|
KOPy/__init__.py
|
alexrudy/KOPy
|
fcce623f1efc2e2891645af484f3dc23b70ce7f8
|
[
"BSD-3-Clause"
] | null | null | null |
KOPy/__init__.py
|
alexrudy/KOPy
|
fcce623f1efc2e2891645af484f3dc23b70ce7f8
|
[
"BSD-3-Clause"
] | null | null | null |
KOPy/__init__.py
|
alexrudy/KOPy
|
fcce623f1efc2e2891645af484f3dc23b70ce7f8
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
KOPy - Keck Observing Python
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
pass
| 30.75
| 78
| 0.506098
| 53
| 492
| 4.584906
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002358
| 0.138211
| 492
| 15
| 79
| 32.8
| 0.570755
| 0.827236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
d9ca34b32611e04843b57807b53f42282d7aa2b4
| 144
|
py
|
Python
|
posts/widgets.py
|
Duskhorizon/discoplaytogether
|
e74a11b0f65d14db6f15d1bb0536411dd546eda6
|
[
"MIT"
] | null | null | null |
posts/widgets.py
|
Duskhorizon/discoplaytogether
|
e74a11b0f65d14db6f15d1bb0536411dd546eda6
|
[
"MIT"
] | null | null | null |
posts/widgets.py
|
Duskhorizon/discoplaytogether
|
e74a11b0f65d14db6f15d1bb0536411dd546eda6
|
[
"MIT"
] | null | null | null |
from django.forms import DateTimeInput
class XDSoftDateTimePickerInput(DateTimeInput):
template_name = 'widgets/xdsoft_datetimepicker.html'
| 36
| 56
| 0.847222
| 14
| 144
| 8.571429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090278
| 144
| 4
| 56
| 36
| 0.916031
| 0
| 0
| 0
| 0
| 0
| 0.234483
| 0.234483
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
d9cf9573a2207d7e74cd336a12bbcab09f94e2d9
| 70
|
py
|
Python
|
create_schema.py
|
dwahler/spelling-list-generator
|
acb8c3c0929c8a1caf20db07f8ca896e57827637
|
[
"MIT"
] | 2
|
2019-03-11T07:36:12.000Z
|
2019-09-07T03:55:28.000Z
|
create_schema.py
|
dwahler/spelling-list-generator
|
acb8c3c0929c8a1caf20db07f8ca896e57827637
|
[
"MIT"
] | null | null | null |
create_schema.py
|
dwahler/spelling-list-generator
|
acb8c3c0929c8a1caf20db07f8ca896e57827637
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from spelling.storage import db
db.create_all()
| 17.5
| 31
| 0.771429
| 12
| 70
| 4.416667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 70
| 3
| 32
| 23.333333
| 0.84127
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d9dac6d49626f764fd967fa8db12e093465915c8
| 15
|
py
|
Python
|
src/rewoven/__init__.py
|
radifar/rewoven
|
048ca4337133fe0fab2caa40c5ec8ddd1c52bcd2
|
[
"MIT"
] | null | null | null |
src/rewoven/__init__.py
|
radifar/rewoven
|
048ca4337133fe0fab2caa40c5ec8ddd1c52bcd2
|
[
"MIT"
] | 40
|
2021-11-01T11:22:13.000Z
|
2022-03-30T11:27:05.000Z
|
src/rewoven/__init__.py
|
radifar/rewoven
|
048ca4337133fe0fab2caa40c5ec8ddd1c52bcd2
|
[
"MIT"
] | null | null | null |
"""Rewoven."""
| 7.5
| 14
| 0.466667
| 1
| 15
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 15
| 1
| 15
| 15
| 0.5
| 0.533333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d9db0e9bf33ddd885dd6ee9a60718e604b127977
| 3,162
|
py
|
Python
|
databaser/settings.py
|
yart2211/databaser
|
e34a738b07fff7be384625635f37143a8cec3e97
|
[
"MIT"
] | 21
|
2020-04-29T11:10:14.000Z
|
2022-03-15T11:16:22.000Z
|
databaser/settings.py
|
yart2211/databaser
|
e34a738b07fff7be384625635f37143a8cec3e97
|
[
"MIT"
] | null | null | null |
databaser/settings.py
|
yart2211/databaser
|
e34a738b07fff7be384625635f37143a8cec3e97
|
[
"MIT"
] | 6
|
2020-07-16T05:53:14.000Z
|
2021-09-23T07:15:14.000Z
|
import logging
from core.enums import (
LogLevelEnum,
)
from core.helpers import (
get_bool_environ_parameter,
get_int_environ_parameter,
get_iterable_environ_parameter,
get_str_environ_parameter,
logger,
)
# Logger
LOG_LEVEL = get_str_environ_parameter(
name='DATABASER_LOG_LEVEL',
default=LogLevelEnum.INFO,
)
logger.setLevel(getattr(logging, LOG_LEVEL, logging.INFO))
# Src database connection params
SRC_DB_HOST = get_str_environ_parameter(
name='DATABASER_SRC_DB_HOST',
)
SRC_DB_PORT = get_str_environ_parameter(
name='DATABASER_SRC_DB_PORT',
)
SRC_DB_SCHEMA = get_str_environ_parameter(
name='DATABASER_SRC_DB_SCHEMA',
default='public',
)
SRC_DB_NAME = get_str_environ_parameter(
name='DATABASER_SRC_DB_NAME',
)
SRC_DB_USER = get_str_environ_parameter(
name='DATABASER_SRC_DB_USER',
)
SRC_DB_PASSWORD = get_str_environ_parameter(
name='DATABASER_SRC_DB_PASSWORD',
)
# Dst database connection params
DST_DB_HOST = get_str_environ_parameter(
name='DATABASER_DST_DB_HOST',
)
DST_DB_PORT = get_str_environ_parameter(
name='DATABASER_DST_DB_PORT',
)
DST_DB_SCHEMA = get_str_environ_parameter(
name='DATABASER_DST_DB_SCHEMA',
default='public',
)
DST_DB_NAME = get_str_environ_parameter(
name='DATABASER_DST_DB_NAME',
)
DST_DB_USER = get_str_environ_parameter(
name='DATABASER_DST_DB_USER',
)
DST_DB_PASSWORD = get_str_environ_parameter(
name='DATABASER_DST_DB_PASSWORD',
)
# Test mode parameters
TEST_MODE = get_bool_environ_parameter(
name='DATABASER_TEST_MODE',
)
if TEST_MODE:
logger.warning('TEST MODE ACTIVATED!!!')
KEY_TABLE_NAME = get_str_environ_parameter(
name='DATABASER_KEY_TABLE_NAME',
)
KEY_COLUMN_NAMES = get_iterable_environ_parameter(
name='DATABASER_KEY_COLUMN_NAMES',
)
KEY_COLUMN_VALUES = get_iterable_environ_parameter(
name='DATABASER_KEY_COLUMN_VALUES',
type_=int,
)
KEY_TABLE_HIERARCHY_COLUMN_NAME = get_str_environ_parameter(
name='DATABASER_KEY_TABLE_HIERARCHY_COLUMN_NAME',
)
EXCLUDED_TABLES = get_iterable_environ_parameter(
name='DATABASER_EXCLUDED_TABLES',
)
TABLES_WITH_GENERIC_FOREIGN_KEY = get_iterable_environ_parameter(
name='DATABASER_TABLES_WITH_GENERIC_FOREIGN_KEY',
)
TABLES_LIMIT_PER_TRANSACTION = get_int_environ_parameter(
name='DATABASER_TABLES_LIMIT_PER_TRANSACTION',
default=100,
)
IS_TRUNCATE_TABLES = get_bool_environ_parameter(
name='DATABASER_IS_TRUNCATE_TABLES',
)
TABLES_TRUNCATE_INCLUDED = get_iterable_environ_parameter(
name='DATABASER_TABLES_TRUNCATE_INCLUDED',
)
TABLES_TRUNCATE_EXCLUDED = get_iterable_environ_parameter(
name='DATABASER_TABLES_TRUNCATE_EXCLUDED',
)
if not any(
[
SRC_DB_HOST,
SRC_DB_PORT,
SRC_DB_NAME,
SRC_DB_USER,
SRC_DB_PASSWORD,
DST_DB_HOST,
DST_DB_PORT,
DST_DB_NAME,
DST_DB_USER,
DST_DB_PASSWORD,
KEY_TABLE_NAME,
KEY_COLUMN_NAMES,
KEY_COLUMN_VALUES,
]
):
raise ValueError('You must send all params!')
VALIDATE_DATA_BEFORE_TRANSFERRING = get_bool_environ_parameter(
name='VALIDATE_DATA_BEFORE_TRANSFERRING',
)
| 24.511628
| 65
| 0.772612
| 421
| 3,162
| 5.218527
| 0.171021
| 0.211197
| 0.227583
| 0.316796
| 0.65635
| 0.580792
| 0.414201
| 0.393264
| 0.042786
| 0
| 0
| 0.001116
| 0.149589
| 3,162
| 128
| 66
| 24.703125
| 0.815917
| 0.028147
| 0
| 0.017857
| 0
| 0
| 0.232073
| 0.200456
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.053571
| 0.026786
| 0
| 0.026786
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
d9dbb4e3cf51668889c1d2dd53f6c2920509e999
| 73
|
py
|
Python
|
kriegspiel_api_server/kriegspiel/exceptions.py
|
Kriegspiel/python-api
|
f25107d5ebc8810af622c66960c153c43a1ffb03
|
[
"MIT"
] | 1
|
2016-12-30T10:11:11.000Z
|
2016-12-30T10:11:11.000Z
|
kriegspiel_api_server/kriegspiel/exceptions.py
|
Kriegspiel/python-api
|
f25107d5ebc8810af622c66960c153c43a1ffb03
|
[
"MIT"
] | null | null | null |
kriegspiel_api_server/kriegspiel/exceptions.py
|
Kriegspiel/python-api
|
f25107d5ebc8810af622c66960c153c43a1ffb03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class KriegspielException(Exception):
pass
| 12.166667
| 37
| 0.643836
| 7
| 73
| 6.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.191781
| 73
| 5
| 38
| 14.6
| 0.779661
| 0.287671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
d9fcbec087b3096414a368ead8a5532c01831c36
| 96
|
py
|
Python
|
Intelligent-interrogation/IISever/mainFunc/apps.py
|
Intelligent-interrogation/Intelligent-interrogation
|
17ecdc85b37a1624bf3115a0539008cb0574d5ce
|
[
"MIT"
] | 7
|
2017-12-21T03:45:29.000Z
|
2021-11-06T16:11:50.000Z
|
Intelligent-interrogation/IISever/mainFunc/apps.py
|
Intelligent-interrogation/Intelligent-interrogation
|
17ecdc85b37a1624bf3115a0539008cb0574d5ce
|
[
"MIT"
] | 1
|
2017-12-13T08:41:51.000Z
|
2017-12-13T08:41:51.000Z
|
Intelligent-interrogation/IISever/mainFunc/apps.py
|
Intelligent-interrogation/Intelligent-interrogation
|
17ecdc85b37a1624bf3115a0539008cb0574d5ce
|
[
"MIT"
] | 4
|
2018-06-19T07:00:38.000Z
|
2019-09-18T01:15:31.000Z
|
from django.apps import AppConfig
class MainfuncConfig(AppConfig):
name = 'mainFunc'
| 16
| 34
| 0.71875
| 10
| 96
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 96
| 5
| 35
| 19.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a202dd058b0b797f44343d0a1f94b0f7b2b341c
| 2,194
|
py
|
Python
|
tests/run_data_tests.py
|
Cloudlock/gdata-python3
|
a6481a13590bfa225f91a97b2185cca9aacd1403
|
[
"Apache-2.0"
] | 19
|
2017-06-09T13:38:03.000Z
|
2020-12-12T07:45:48.000Z
|
tests/run_data_tests.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 11
|
2017-07-22T07:09:54.000Z
|
2020-12-02T15:08:48.000Z
|
tests/run_data_tests.py
|
AlexxIT/gdata-python3
|
5cc5a83a469d87f804d1fda8760ec76bcb6050c9
|
[
"Apache-1.1"
] | 25
|
2017-07-03T11:30:39.000Z
|
2020-10-01T02:21:13.000Z
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../src'))
import atom_test
import atom_tests.core_test
import atom_tests.http_interface_test
import atom_tests.mock_http_test
import atom_tests.token_store_test
import atom_tests.url_test
# Modules whose tests we will run.
import gdata_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.multidomain.data_test
import gdata_tests.apps_test
import gdata_tests.auth_test
import gdata_tests.blogger_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.calendar_test
import gdata_tests.client_test
import gdata_tests.codesearch_test
import gdata_tests.contacts_test
import gdata_tests.docs_test
import gdata_tests.oauth.data_test
import gdata_tests.photos_test
import gdata_tests.spreadsheet_test
import gdata_tests.webmastertools_test
import gdata_tests.youtube_test
import module_test_runner
def RunAllTests():
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [gdata_test, atom_test, atom_tests.url_test,
atom_tests.http_interface_test,
atom_tests.mock_http_test,
atom_tests.core_test,
atom_tests.token_store_test,
gdata_tests.client_test,
gdata_tests.apps_test,
gdata_tests.apps.emailsettings.data_test,
gdata_tests.apps.multidomain.data_test,
gdata_tests.auth_test,
gdata_tests.calendar_test, gdata_tests.docs_test,
gdata_tests.spreadsheet_test,
gdata_tests.photos_test, gdata_tests.codesearch_test,
gdata_tests.contacts_test,
gdata_tests.youtube_test, gdata_tests.blogger_test,
gdata_tests.webmastertools_test,
gdata_tests.calendar_resource.data_test,
gdata_tests.oauth.data_test]
test_runner.RunAllTests()
if __name__ == '__main__':
RunAllTests()
| 38.491228
| 86
| 0.680948
| 266
| 2,194
| 5.184211
| 0.195489
| 0.232052
| 0.174039
| 0.232052
| 0.345178
| 0.166788
| 0
| 0
| 0
| 0
| 0
| 0.000619
| 0.263446
| 2,194
| 56
| 87
| 39.178571
| 0.852723
| 0.014585
| 0
| 0
| 0
| 0
| 0.006481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.52
| 0
| 0.54
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a34fe18f81ca4dd57f7f9d5cb96c87fb91d6fea
| 205
|
py
|
Python
|
django/contrib/syndication/apps.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | 2
|
2016-09-27T09:30:19.000Z
|
2016-10-17T01:47:43.000Z
|
env/lib/python2.7/site-packages/django/contrib/syndication/apps.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
env/lib/python2.7/site-packages/django/contrib/syndication/apps.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 1
|
2020-05-25T08:55:19.000Z
|
2020-05-25T08:55:19.000Z
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SyndicationConfig(AppConfig):
name = 'django.contrib.syndication'
verbose_name = _("Syndication")
| 22.777778
| 55
| 0.780488
| 23
| 205
| 6.782609
| 0.695652
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141463
| 205
| 8
| 56
| 25.625
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0.180488
| 0.126829
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a67d40fcf341e06108d4cbf7ff08865af1229bc
| 133
|
py
|
Python
|
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/circle.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
r=int(input("enter radius\n"))
area=3.14*r*r
print(area)
r=int(input("enter radius\n"))
circumference=2*3.14*r
print(circumference)
| 16.625
| 30
| 0.721805
| 26
| 133
| 3.692308
| 0.461538
| 0.083333
| 0.1875
| 0.291667
| 0.4375
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0.056452
| 0.067669
| 133
| 7
| 31
| 19
| 0.717742
| 0
| 0
| 0.333333
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a8e8552549687adefcd485b49a51a6a0e84b48e
| 262
|
py
|
Python
|
core/consts.py
|
telminov/viber-service
|
21f671522084ff1afcd320406a7e5bf71a04437f
|
[
"MIT"
] | null | null | null |
core/consts.py
|
telminov/viber-service
|
21f671522084ff1afcd320406a7e5bf71a04437f
|
[
"MIT"
] | 1
|
2017-09-21T14:08:57.000Z
|
2017-09-21T14:08:57.000Z
|
core/consts.py
|
telminov/viber-service
|
21f671522084ff1afcd320406a7e5bf71a04437f
|
[
"MIT"
] | null | null | null |
SEND_TEXT = 'send_text'
SEND_IMAGE = 'send_image'
SEND_TEXT_AND_BUTTON = 'send_text_and_button'
CHECK_STATUS_MESSAGES = 'check_status_messages'
API = [SEND_TEXT, SEND_IMAGE, SEND_TEXT_AND_BUTTON, CHECK_STATUS_MESSAGES]
API_CHOICES = [(api, api) for api in API]
| 32.75
| 74
| 0.805344
| 42
| 262
| 4.5
| 0.285714
| 0.253968
| 0.190476
| 0.269841
| 0.698413
| 0.566138
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0.103053
| 262
| 8
| 75
| 32.75
| 0.804255
| 0
| 0
| 0
| 0
| 0
| 0.228137
| 0.079848
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a920e80df22a9bd3983e7cd6ef1db5a0e7bf417
| 59
|
py
|
Python
|
tests/assets/scannersuccess/scannersuccess/in/class.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 63
|
2018-06-21T10:39:54.000Z
|
2021-06-04T14:28:44.000Z
|
tests/assets/scannersuccess/scannersuccess/in/class.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 86
|
2018-06-20T13:30:30.000Z
|
2019-06-04T12:47:28.000Z
|
tests/assets/scannersuccess/scannersuccess/in/class.py
|
SimonBiggs/layer_linter
|
9eb518b74118e4a2d8079e2f32ecc12612ca9e86
|
[
"BSD-3-Clause"
] | 4
|
2018-08-14T08:49:55.000Z
|
2019-02-16T09:24:47.000Z
|
# 'Class' is a reserved keyword so can't be a module name.
| 29.5
| 58
| 0.711864
| 12
| 59
| 3.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20339
| 59
| 1
| 59
| 59
| 0.893617
| 0.949153
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8aa85418b10b53fb83bf755181836087da8a451c
| 18,553
|
py
|
Python
|
VersionAlpha0.0.1/performanceEstimator/indexer/tokenizer.py
|
GoranBotic/Water_Fern
|
d4f513315c0cd668b17529cc65271bf455b15adc
|
[
"MIT"
] | null | null | null |
VersionAlpha0.0.1/performanceEstimator/indexer/tokenizer.py
|
GoranBotic/Water_Fern
|
d4f513315c0cd668b17529cc65271bf455b15adc
|
[
"MIT"
] | null | null | null |
VersionAlpha0.0.1/performanceEstimator/indexer/tokenizer.py
|
GoranBotic/Water_Fern
|
d4f513315c0cd668b17529cc65271bf455b15adc
|
[
"MIT"
] | null | null | null |
# Generated from JavaParser.g4 by ANTLR 4.7.1
from antlr4 import *
from indexer.JavaLexer import JavaLexer
# This class defines a complete generic visitor for a parse tree produced by JavaParser.
#theStuff = dict()
from hashlib import md5
class Tokenizer(ParseTreeVisitor):
#this provides some default behavior
def visit(self, ctx):
if str(type(ctx)) == "<class 'antlr4.tree.Tree.TerminalNodeImpl'>":
#theStuff[type(ctx.getParent())] = True
#by default terminals simply evaluate to their symbol
#to override this the vist... method which encounters the terminal should manually its children producing the desired terminal values for the terminal children
#if that implementation is done then this line does not need to be changed, the default behavior will still be to use the symbol name
return str(JavaLexer.symbolicNames[ctx.getSymbol().type])
theList = ctx.accept(self)
theHash = ""
if len(theList) > 1:
#do the aggregation
#right now this simply concatenates all the strings and hashes
#this needs to be changed so that similar sub-trees will get similar hashes
#have all terminals evaluate to a type
#have all non-terminals evaluate to a type
#name the thing according to the type frequency count
for x in theList:
if str(x).strip() != "":
theHash = theHash + x + "\n"
#theHash = md5(theHash.encode()).hexdigest()
else:
theHash = theList[0]
#store the hash in the DB along with some kind of link to its accociated code
#should also search for existing similar code
return theHash
def visitChildren(self, ctx):
ret = []
for x in range(ctx.getChildCount()):
# if str(type(ctx.getChild(x))) == "<class 'antlr4.tree.Tree.TerminalNodeImpl'>":
# print(ctx.getChild(x).getSymbol())
# else:
# print(ctx.getChild(x).getPayload().toStringTree())
ret.append(self.visit(ctx.getChild(x)))
return ret
# def visitTerminal(self, ctx):
# return 1
# Visit a parse tree produced by JavaParser#compilationUnit.
def visitCompilationUnit(self, ctx):
print("Start parsing")
print(ctx.getChildCount())
tmp = self.visitChildren(ctx)
ret = ""
for x in tmp:
ret = ret + x
#define a file scope dict called theStuff
#in the visit method add something like theStuff[type(ctx.getParent())] = True
#then all vist... s with terminal children will be added to the stuff, and the keys of the stuff can be print to produce a list of terminal visit...s which required an implementation
# for k in theStuff.keys():
# print(k)
ret = ret.split("\n")
ret2 = []
for x in ret:
if x.strip() != "":
ret2.append(x)
return ret2
# Visit a parse tree produced by JavaParser#packageDeclaration.
def visitPackageDeclaration(self, ctx):
print("packages")
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#importDeclaration.
def visitImportDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeDeclaration.
def visitTypeDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#modifier.
def visitModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classOrInterfaceModifier.
def visitClassOrInterfaceModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#variableModifier.
def visitVariableModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classDeclaration.
def visitClassDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeParameters.
def visitTypeParameters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeParameter.
def visitTypeParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeBound.
def visitTypeBound(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#enumDeclaration.
def visitEnumDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#enumConstants.
def visitEnumConstants(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#enumConstant.
def visitEnumConstant(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#enumBodyDeclarations.
def visitEnumBodyDeclarations(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceDeclaration.
def visitInterfaceDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classBody.
def visitClassBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceBody.
def visitInterfaceBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classBodyDeclaration.
def visitClassBodyDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#memberDeclaration.
def visitMemberDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#methodDeclaration.
def visitMethodDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#methodBody.
def visitMethodBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeTypeOrVoid.
def visitTypeTypeOrVoid(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#genericMethodDeclaration.
def visitGenericMethodDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#genericConstructorDeclaration.
def visitGenericConstructorDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#constructorDeclaration.
def visitConstructorDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#fieldDeclaration.
def visitFieldDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceBodyDeclaration.
def visitInterfaceBodyDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceMemberDeclaration.
def visitInterfaceMemberDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#constDeclaration.
def visitConstDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#constantDeclarator.
def visitConstantDeclarator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceMethodDeclaration.
def visitInterfaceMethodDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#interfaceMethodModifier.
def visitInterfaceMethodModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#genericInterfaceMethodDeclaration.
def visitGenericInterfaceMethodDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#variableDeclarators.
def visitVariableDeclarators(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#variableDeclarator.
def visitVariableDeclarator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#variableDeclaratorId.
def visitVariableDeclaratorId(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#variableInitializer.
def visitVariableInitializer(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#arrayInitializer.
def visitArrayInitializer(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classOrInterfaceType.
def visitClassOrInterfaceType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeArgument.
def visitTypeArgument(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#qualifiedNameList.
def visitQualifiedNameList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#formalParameters.
def visitFormalParameters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#formalParameterList.
def visitFormalParameterList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#formalParameter.
def visitFormalParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#lastFormalParameter.
def visitLastFormalParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#qualifiedName.
def visitQualifiedName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#literal.
def visitLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#integerLiteral.
def visitIntegerLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#floatLiteral.
def visitFloatLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotation.
def visitAnnotation(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#elementValuePairs.
def visitElementValuePairs(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#elementValuePair.
def visitElementValuePair(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#elementValue.
def visitElementValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#elementValueArrayInitializer.
def visitElementValueArrayInitializer(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationTypeDeclaration.
def visitAnnotationTypeDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationTypeBody.
def visitAnnotationTypeBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationTypeElementDeclaration.
def visitAnnotationTypeElementDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationTypeElementRest.
def visitAnnotationTypeElementRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationMethodOrConstantRest.
def visitAnnotationMethodOrConstantRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationMethodRest.
def visitAnnotationMethodRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#annotationConstantRest.
def visitAnnotationConstantRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#defaultValue.
def visitDefaultValue(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#block.
def visitBlock(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#blockStatement.
def visitBlockStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#localVariableDeclaration.
def visitLocalVariableDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#localTypeDeclaration.
def visitLocalTypeDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#statement.
def visitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#catchClause.
def visitCatchClause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#catchType.
def visitCatchType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#finallyBlock.
def visitFinallyBlock(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#resourceSpecification.
def visitResourceSpecification(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#resources.
def visitResources(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#resource.
def visitResource(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#switchBlockStatementGroup.
def visitSwitchBlockStatementGroup(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#switchLabel.
def visitSwitchLabel(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#forControl.
def visitForControl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#forInit.
def visitForInit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#enhancedForControl.
def visitEnhancedForControl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#parExpression.
def visitParExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#expressionList.
def visitExpressionList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#methodCall.
def visitMethodCall(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#expression.
def visitExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#lambdaExpression.
def visitLambdaExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#lambdaParameters.
def visitLambdaParameters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#lambdaBody.
def visitLambdaBody(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#primary.
def visitPrimary(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classType.
def visitClassType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#creator.
def visitCreator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#createdName.
def visitCreatedName(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#innerCreator.
def visitInnerCreator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#arrayCreatorRest.
def visitArrayCreatorRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#classCreatorRest.
def visitClassCreatorRest(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#explicitGenericInvocation.
def visitExplicitGenericInvocation(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeArgumentsOrDiamond.
def visitTypeArgumentsOrDiamond(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#nonWildcardTypeArgumentsOrDiamond.
def visitNonWildcardTypeArgumentsOrDiamond(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#nonWildcardTypeArguments.
def visitNonWildcardTypeArguments(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeList.
def visitTypeList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeType.
def visitTypeType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#primitiveType.
def visitPrimitiveType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#typeArguments.
def visitTypeArguments(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#superSuffix.
def visitSuperSuffix(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#explicitGenericInvocationSuffix.
def visitExplicitGenericInvocationSuffix(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by JavaParser#arguments.
def visitArguments(self, ctx):
return self.visitChildren(ctx)
| 30.767828
| 190
| 0.705546
| 2,038
| 18,553
| 6.422964
| 0.18106
| 0.057219
| 0.080214
| 0.144385
| 0.554622
| 0.539496
| 0.534683
| 0.529335
| 0.529335
| 0.529335
| 0
| 0.001043
| 0.224815
| 18,553
| 602
| 191
| 30.818937
| 0.909123
| 0.417453
| 0
| 0.427386
| 1
| 0
| 0.006434
| 0.003406
| 0
| 0
| 0
| 0
| 0
| 1
| 0.439834
| false
| 0
| 0.016598
| 0.423237
| 0.904564
| 0.012448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
8ac3ec5a75dfd6f612253b02a2c195507cfa0eaf
| 531
|
py
|
Python
|
pyportlib/__init__.py
|
phil-lo/PortfolioCore
|
3fbe7460c809a80e48615e934990dcd2d1f5003b
|
[
"CC0-1.0"
] | 2
|
2021-11-18T21:40:02.000Z
|
2021-12-13T21:01:18.000Z
|
pyportlib/__init__.py
|
phil-lo/PortfolioCore
|
3fbe7460c809a80e48615e934990dcd2d1f5003b
|
[
"CC0-1.0"
] | null | null | null |
pyportlib/__init__.py
|
phil-lo/PortfolioCore
|
3fbe7460c809a80e48615e934990dcd2d1f5003b
|
[
"CC0-1.0"
] | null | null | null |
from pyportlib.portfolio import Portfolio
from pyportlib.position import Position
from pyportlib.reporting import plots, html_reports
from pyportlib.services.transaction import Transaction
from pyportlib.services.cash_change import CashChange
from pyportlib.utils.files_utils import set_client_dir
from pyportlib.utils import dates_utils, files_utils, time_series, df_utils
from pyportlib.utils.indices import Index
from pyportlib.metrics import stats
from pyportlib.account_sources.questrade_connection import QuestradeConnection
| 48.272727
| 78
| 0.881356
| 70
| 531
| 6.528571
| 0.457143
| 0.284464
| 0.118162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082863
| 531
| 10
| 79
| 53.1
| 0.938398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8acdc056be73dd5efca538564809ca38737fdb0b
| 1,794
|
py
|
Python
|
tests/unit/service_test.py
|
ryanbrainard/fig
|
9a825c5c35128b0b49eb255966f7a93b4093459a
|
[
"Apache-2.0"
] | 1
|
2016-04-25T08:43:35.000Z
|
2016-04-25T08:43:35.000Z
|
tests/unit/service_test.py
|
ryanbrainard/fig
|
9a825c5c35128b0b49eb255966f7a93b4093459a
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/service_test.py
|
ryanbrainard/fig
|
9a825c5c35128b0b49eb255966f7a93b4093459a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import absolute_import
from .. import unittest
from fig import Service
from fig.service import ConfigError, split_port
class ServiceTest(unittest.TestCase):
def test_name_validations(self):
self.assertRaises(ConfigError, lambda: Service(name=''))
self.assertRaises(ConfigError, lambda: Service(name=' '))
self.assertRaises(ConfigError, lambda: Service(name='/'))
self.assertRaises(ConfigError, lambda: Service(name='!'))
self.assertRaises(ConfigError, lambda: Service(name='\xe2'))
self.assertRaises(ConfigError, lambda: Service(name='_'))
self.assertRaises(ConfigError, lambda: Service(name='____'))
self.assertRaises(ConfigError, lambda: Service(name='foo_bar'))
self.assertRaises(ConfigError, lambda: Service(name='__foo_bar__'))
Service('a')
Service('foo')
def test_project_validation(self):
self.assertRaises(ConfigError, lambda: Service(name='foo', project='_'))
Service(name='foo', project='bar')
def test_config_validation(self):
self.assertRaises(ConfigError, lambda: Service(name='foo', port=['8000']))
Service(name='foo', ports=['8000'])
def test_split_port(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, ("127.0.0.1", "1000"))
internal_port, external_port = split_port("127.0.0.1::2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, ("127.0.0.1",))
internal_port, external_port = split_port("1000:2000")
self.assertEqual(internal_port, "2000")
self.assertEqual(external_port, "1000")
| 39.866667
| 82
| 0.684504
| 206
| 1,794
| 5.737864
| 0.18932
| 0.120981
| 0.251269
| 0.307107
| 0.72335
| 0.72335
| 0.692047
| 0.688663
| 0.641286
| 0.538071
| 0
| 0.049391
| 0.176143
| 1,794
| 44
| 83
| 40.772727
| 0.750338
| 0
| 0
| 0.147059
| 0
| 0
| 0.077567
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.117647
| false
| 0
| 0.147059
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8ace10c7b0e37f6633ee727c284be0d9be61c3e2
| 81
|
py
|
Python
|
docker/python/django/testapp/foo/apps.py
|
jlvoiseux/apm-integration-testing
|
53ec49f80bb8dc8175e21e9ac26452fa8c3b7cf0
|
[
"Apache-2.0"
] | 191
|
2015-02-18T08:52:21.000Z
|
2022-03-24T18:38:36.000Z
|
docker/python/django/testapp/foo/apps.py
|
jlvoiseux/apm-integration-testing
|
53ec49f80bb8dc8175e21e9ac26452fa8c3b7cf0
|
[
"Apache-2.0"
] | 902
|
2018-05-25T15:42:22.000Z
|
2022-03-31T16:12:35.000Z
|
docker/python/django/testapp/foo/apps.py
|
jlvoiseux/apm-integration-testing
|
53ec49f80bb8dc8175e21e9ac26452fa8c3b7cf0
|
[
"Apache-2.0"
] | 68
|
2015-07-09T09:40:26.000Z
|
2022-02-07T20:46:35.000Z
|
from django.apps import AppConfig
class FooConfig(AppConfig):
name = 'foo'
| 13.5
| 33
| 0.728395
| 10
| 81
| 5.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 81
| 5
| 34
| 16.2
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
76d976a74595513a5144dc3bbce73fc7ae015b3f
| 25,531
|
py
|
Python
|
sdk/python/pulumi_azure_native/desktopvirtualization/application.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/desktopvirtualization/application.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/desktopvirtualization/application.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = ['ApplicationArgs', 'Application']
@pulumi.input_type
class ApplicationArgs:
def __init__(__self__, *,
application_group_name: pulumi.Input[str],
command_line_setting: pulumi.Input[Union[str, 'CommandLineSetting']],
resource_group_name: pulumi.Input[str],
application_name: Optional[pulumi.Input[str]] = None,
application_type: Optional[pulumi.Input[Union[str, 'RemoteApplicationType']]] = None,
command_line_arguments: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
icon_index: Optional[pulumi.Input[int]] = None,
icon_path: Optional[pulumi.Input[str]] = None,
msix_package_application_id: Optional[pulumi.Input[str]] = None,
msix_package_family_name: Optional[pulumi.Input[str]] = None,
show_in_portal: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Application resource.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[Union[str, 'CommandLineSetting']] command_line_setting: Specifies whether this published application can be launched with command line arguments provided by the client, command line arguments specified at publish time, or no command line arguments at all.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] application_name: The name of the application within the specified application group
:param pulumi.Input[Union[str, 'RemoteApplicationType']] application_type: Resource Type of Application.
:param pulumi.Input[str] command_line_arguments: Command Line Arguments for Application.
:param pulumi.Input[str] description: Description of Application.
:param pulumi.Input[str] file_path: Specifies a path for the executable file for the application.
:param pulumi.Input[str] friendly_name: Friendly name of Application.
:param pulumi.Input[int] icon_index: Index of the icon.
:param pulumi.Input[str] icon_path: Path to icon.
:param pulumi.Input[str] msix_package_application_id: Specifies the package application Id for MSIX applications
:param pulumi.Input[str] msix_package_family_name: Specifies the package family name for MSIX applications
:param pulumi.Input[bool] show_in_portal: Specifies whether to show the RemoteApp program in the RD Web Access server.
"""
pulumi.set(__self__, "application_group_name", application_group_name)
pulumi.set(__self__, "command_line_setting", command_line_setting)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_name is not None:
pulumi.set(__self__, "application_name", application_name)
if application_type is not None:
pulumi.set(__self__, "application_type", application_type)
if command_line_arguments is not None:
pulumi.set(__self__, "command_line_arguments", command_line_arguments)
if description is not None:
pulumi.set(__self__, "description", description)
if file_path is not None:
pulumi.set(__self__, "file_path", file_path)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if icon_index is not None:
pulumi.set(__self__, "icon_index", icon_index)
if icon_path is not None:
pulumi.set(__self__, "icon_path", icon_path)
if msix_package_application_id is not None:
pulumi.set(__self__, "msix_package_application_id", msix_package_application_id)
if msix_package_family_name is not None:
pulumi.set(__self__, "msix_package_family_name", msix_package_family_name)
if show_in_portal is not None:
pulumi.set(__self__, "show_in_portal", show_in_portal)
@property
@pulumi.getter(name="applicationGroupName")
def application_group_name(self) -> pulumi.Input[str]:
"""
The name of the application group
"""
return pulumi.get(self, "application_group_name")
@application_group_name.setter
def application_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "application_group_name", value)
@property
@pulumi.getter(name="commandLineSetting")
def command_line_setting(self) -> pulumi.Input[Union[str, 'CommandLineSetting']]:
"""
Specifies whether this published application can be launched with command line arguments provided by the client, command line arguments specified at publish time, or no command line arguments at all.
"""
return pulumi.get(self, "command_line_setting")
@command_line_setting.setter
def command_line_setting(self, value: pulumi.Input[Union[str, 'CommandLineSetting']]):
pulumi.set(self, "command_line_setting", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the application within the specified application group
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="applicationType")
def application_type(self) -> Optional[pulumi.Input[Union[str, 'RemoteApplicationType']]]:
"""
Resource Type of Application.
"""
return pulumi.get(self, "application_type")
@application_type.setter
def application_type(self, value: Optional[pulumi.Input[Union[str, 'RemoteApplicationType']]]):
pulumi.set(self, "application_type", value)
@property
@pulumi.getter(name="commandLineArguments")
def command_line_arguments(self) -> Optional[pulumi.Input[str]]:
"""
Command Line Arguments for Application.
"""
return pulumi.get(self, "command_line_arguments")
@command_line_arguments.setter
def command_line_arguments(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "command_line_arguments", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of Application.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="filePath")
def file_path(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a path for the executable file for the application.
"""
return pulumi.get(self, "file_path")
@file_path.setter
def file_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_path", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of Application.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="iconIndex")
def icon_index(self) -> Optional[pulumi.Input[int]]:
"""
Index of the icon.
"""
return pulumi.get(self, "icon_index")
@icon_index.setter
def icon_index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "icon_index", value)
@property
@pulumi.getter(name="iconPath")
def icon_path(self) -> Optional[pulumi.Input[str]]:
"""
Path to icon.
"""
return pulumi.get(self, "icon_path")
@icon_path.setter
def icon_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "icon_path", value)
@property
@pulumi.getter(name="msixPackageApplicationId")
def msix_package_application_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the package application Id for MSIX applications
"""
return pulumi.get(self, "msix_package_application_id")
@msix_package_application_id.setter
def msix_package_application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msix_package_application_id", value)
@property
@pulumi.getter(name="msixPackageFamilyName")
def msix_package_family_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the package family name for MSIX applications
"""
return pulumi.get(self, "msix_package_family_name")
@msix_package_family_name.setter
def msix_package_family_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msix_package_family_name", value)
@property
@pulumi.getter(name="showInPortal")
def show_in_portal(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to show the RemoteApp program in the RD Web Access server.
"""
return pulumi.get(self, "show_in_portal")
@show_in_portal.setter
def show_in_portal(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "show_in_portal", value)
class Application(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_name: Optional[pulumi.Input[str]] = None,
application_type: Optional[pulumi.Input[Union[str, 'RemoteApplicationType']]] = None,
command_line_arguments: Optional[pulumi.Input[str]] = None,
command_line_setting: Optional[pulumi.Input[Union[str, 'CommandLineSetting']]] = None,
description: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
icon_index: Optional[pulumi.Input[int]] = None,
icon_path: Optional[pulumi.Input[str]] = None,
msix_package_application_id: Optional[pulumi.Input[str]] = None,
msix_package_family_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
show_in_portal: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Schema for Application properties.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_group_name: The name of the application group
:param pulumi.Input[str] application_name: The name of the application within the specified application group
:param pulumi.Input[Union[str, 'RemoteApplicationType']] application_type: Resource Type of Application.
:param pulumi.Input[str] command_line_arguments: Command Line Arguments for Application.
:param pulumi.Input[Union[str, 'CommandLineSetting']] command_line_setting: Specifies whether this published application can be launched with command line arguments provided by the client, command line arguments specified at publish time, or no command line arguments at all.
:param pulumi.Input[str] description: Description of Application.
:param pulumi.Input[str] file_path: Specifies a path for the executable file for the application.
:param pulumi.Input[str] friendly_name: Friendly name of Application.
:param pulumi.Input[int] icon_index: Index of the icon.
:param pulumi.Input[str] icon_path: Path to icon.
:param pulumi.Input[str] msix_package_application_id: Specifies the package application Id for MSIX applications
:param pulumi.Input[str] msix_package_family_name: Specifies the package family name for MSIX applications
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[bool] show_in_portal: Specifies whether to show the RemoteApp program in the RD Web Access server.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Schema for Application properties.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param ApplicationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_name: Optional[pulumi.Input[str]] = None,
application_name: Optional[pulumi.Input[str]] = None,
application_type: Optional[pulumi.Input[Union[str, 'RemoteApplicationType']]] = None,
command_line_arguments: Optional[pulumi.Input[str]] = None,
command_line_setting: Optional[pulumi.Input[Union[str, 'CommandLineSetting']]] = None,
description: Optional[pulumi.Input[str]] = None,
file_path: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
icon_index: Optional[pulumi.Input[int]] = None,
icon_path: Optional[pulumi.Input[str]] = None,
msix_package_application_id: Optional[pulumi.Input[str]] = None,
msix_package_family_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
show_in_portal: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationArgs.__new__(ApplicationArgs)
if application_group_name is None and not opts.urn:
raise TypeError("Missing required property 'application_group_name'")
__props__.__dict__["application_group_name"] = application_group_name
__props__.__dict__["application_name"] = application_name
__props__.__dict__["application_type"] = application_type
__props__.__dict__["command_line_arguments"] = command_line_arguments
if command_line_setting is None and not opts.urn:
raise TypeError("Missing required property 'command_line_setting'")
__props__.__dict__["command_line_setting"] = command_line_setting
__props__.__dict__["description"] = description
__props__.__dict__["file_path"] = file_path
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["icon_index"] = icon_index
__props__.__dict__["icon_path"] = icon_path
__props__.__dict__["msix_package_application_id"] = msix_package_application_id
__props__.__dict__["msix_package_family_name"] = msix_package_family_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["show_in_portal"] = show_in_portal
__props__.__dict__["icon_content"] = None
__props__.__dict__["icon_hash"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:desktopvirtualization:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190123preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20190924preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20191210preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20200921preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201019preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201019preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201102preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201102preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20201110preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210114preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210201preview:Application"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:Application"), pulumi.Alias(type_="azure-nextgen:desktopvirtualization/v20210309preview:Application")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Application, __self__).__init__(
'azure-native:desktopvirtualization:Application',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Application':
"""
Get an existing Application resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApplicationArgs.__new__(ApplicationArgs)
__props__.__dict__["application_type"] = None
__props__.__dict__["command_line_arguments"] = None
__props__.__dict__["command_line_setting"] = None
__props__.__dict__["description"] = None
__props__.__dict__["file_path"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["icon_content"] = None
__props__.__dict__["icon_hash"] = None
__props__.__dict__["icon_index"] = None
__props__.__dict__["icon_path"] = None
__props__.__dict__["msix_package_application_id"] = None
__props__.__dict__["msix_package_family_name"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["show_in_portal"] = None
__props__.__dict__["type"] = None
return Application(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationType")
def application_type(self) -> pulumi.Output[Optional[str]]:
"""
Resource Type of Application.
"""
return pulumi.get(self, "application_type")
@property
@pulumi.getter(name="commandLineArguments")
def command_line_arguments(self) -> pulumi.Output[Optional[str]]:
"""
Command Line Arguments for Application.
"""
return pulumi.get(self, "command_line_arguments")
@property
@pulumi.getter(name="commandLineSetting")
def command_line_setting(self) -> pulumi.Output[str]:
"""
Specifies whether this published application can be launched with command line arguments provided by the client, command line arguments specified at publish time, or no command line arguments at all.
"""
return pulumi.get(self, "command_line_setting")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of Application.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="filePath")
def file_path(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a path for the executable file for the application.
"""
return pulumi.get(self, "file_path")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of Application.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="iconContent")
def icon_content(self) -> pulumi.Output[str]:
"""
the icon a 64 bit string as a byte array.
"""
return pulumi.get(self, "icon_content")
@property
@pulumi.getter(name="iconHash")
def icon_hash(self) -> pulumi.Output[str]:
"""
Hash of the icon.
"""
return pulumi.get(self, "icon_hash")
@property
@pulumi.getter(name="iconIndex")
def icon_index(self) -> pulumi.Output[Optional[int]]:
"""
Index of the icon.
"""
return pulumi.get(self, "icon_index")
@property
@pulumi.getter(name="iconPath")
def icon_path(self) -> pulumi.Output[Optional[str]]:
"""
Path to icon.
"""
return pulumi.get(self, "icon_path")
@property
@pulumi.getter(name="msixPackageApplicationId")
def msix_package_application_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the package application Id for MSIX applications
"""
return pulumi.get(self, "msix_package_application_id")
@property
@pulumi.getter(name="msixPackageFamilyName")
def msix_package_family_name(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the package family name for MSIX applications
"""
return pulumi.get(self, "msix_package_family_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Output[str]:
"""
ObjectId of Application. (internal use)
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="showInPortal")
def show_in_portal(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to show the RemoteApp program in the RD Web Access server.
"""
return pulumi.get(self, "show_in_portal")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 48.171698
| 1,874
| 0.675806
| 2,883
| 25,531
| 5.675338
| 0.073188
| 0.067901
| 0.061606
| 0.059161
| 0.826977
| 0.750153
| 0.692214
| 0.552439
| 0.530497
| 0.474942
| 0
| 0.009013
| 0.222083
| 25,531
| 529
| 1,875
| 48.26276
| 0.814813
| 0.217109
| 0
| 0.425076
| 1
| 0
| 0.193859
| 0.11387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152905
| false
| 0.003058
| 0.018349
| 0
| 0.272171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
76f250487d9708ee70a7784ad67d42eb67f7c889
| 1,266
|
py
|
Python
|
astro_cloud_tests/fits/test_utils.py
|
RichardScottOZ/astro-cloud
|
f45d002566e97007f1bdfa7dce9094ec27c3f912
|
[
"MIT"
] | 1
|
2022-02-02T11:03:54.000Z
|
2022-02-02T11:03:54.000Z
|
astro_cloud_tests/fits/test_utils.py
|
RichardScottOZ/astro-cloud
|
f45d002566e97007f1bdfa7dce9094ec27c3f912
|
[
"MIT"
] | 2
|
2020-10-13T17:31:09.000Z
|
2021-04-04T22:31:03.000Z
|
astro_cloud_tests/fits/test_utils.py
|
RichardScottOZ/astro-cloud
|
f45d002566e97007f1bdfa7dce9094ec27c3f912
|
[
"MIT"
] | 1
|
2022-02-02T11:04:37.000Z
|
2022-02-02T11:04:37.000Z
|
from astro_cloud_tests.pytest_utils import fits_files
def test__as_np_dtype__uint8():
import numpy as np
from astro_cloud.fits.utils import as_np_dtype
assert as_np_dtype(8) == np.dtype(np.uint8)
def test__as_np_dtype__uint16():
import numpy as np
from astro_cloud.fits.utils import as_np_dtype
assert as_np_dtype(16) == np.dtype(np.uint16)
def test__as_np_dtype__uint32():
import numpy as np
from astro_cloud.fits.utils import as_np_dtype
assert as_np_dtype(32) == np.dtype(np.uint32)
def test__as_np_dtype__float32():
import numpy as np
from astro_cloud.fits.utils import as_np_dtype
assert as_np_dtype(-32) == np.dtype(np.float32)
def test__as_np_dtype__float64():
import numpy as np
from astro_cloud.fits.utils import as_np_dtype
assert as_np_dtype(-64) == np.dtype(np.float64)
def test__find_next_header_offset__primary_header():
from astro_cloud.fits.datatypes import FITSHeader
from astro_cloud.fits.utils import find_next_header_offset
def test__find_next_header_offset__table(fits_files):
from astro_cloud.fits.datatypes import FITSHeader
from astro_cloud.fits.utils import find_next_header_offset
# import pdb; pdb.set_trace()
# import sys; sys.exit(1)
| 25.836735
| 62
| 0.765403
| 210
| 1,266
| 4.204762
| 0.185714
| 0.0906
| 0.152888
| 0.183465
| 0.770102
| 0.679502
| 0.618347
| 0.618347
| 0.618347
| 0.618347
| 0
| 0.02639
| 0.161927
| 1,266
| 48
| 63
| 26.375
| 0.805844
| 0.040284
| 0
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.259259
| false
| 0
| 0.555556
| 0
| 0.814815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
76fbf00220b15db81ebeef9af48312f51593a2c1
| 80,376
|
py
|
Python
|
nova/tests/unit/virt/ironic/test_driver.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/ironic/test_driver.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/ironic/test_driver.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | 1
|
2020-03-01T17:04:57.000Z
|
2020-03-01T17:04:57.000Z
|
# Copyright 2015 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the ironic driver."""
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from oslo_service import loopingcall
from oslo_utils import uuidutils
import six
from testtools.matchers import HasLength
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state as nova_states
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper as cw
from nova.virt.ironic import driver as ironic_driver
from nova.virt.ironic import ironic_states
CONF = cfg.CONF
IRONIC_FLAGS = dict(
api_version=1,
group='ironic',
)
FAKE_CLIENT = ironic_utils.FakeClient()
class FakeClientWrapper(cw.IronicClientWrapper):
def _get_client(self, retry_on_conflict=True):
return FAKE_CLIENT
class FakeLoopingCall(object):
def __init__(self):
self.wait = mock.MagicMock()
self.start = mock.MagicMock()
self.start.return_value = self
def _get_properties():
return {'cpus': 2,
'memory_mb': 512,
'local_gb': 10,
'cpu_arch': 'x86_64',
'capabilities': None}
def _get_instance_info():
return {'vcpus': 1,
'memory_mb': 1024,
'local_gb': 10}
def _get_stats():
return {'cpu_arch': 'x86_64'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
self.instance_uuid = uuidutils.generate_uuid()
# mock retries configs to avoid sleeps and make tests run quicker
CONF.set_default('api_max_retries', default=1, group='ironic')
CONF.set_default('api_retry_interval', default=0, group='ironic')
def test_public_api_signatures(self):
self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
def test_driver_capabilities(self):
self.assertFalse(self.driver.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertFalse(self.driver.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
self.assertFalse(self.driver.capabilities[
'supports_migrate_to_same_host'],
'Driver capabilities for '
'\'supports_migrate_to_same_host\' is invalid')
self.assertFalse(self.driver.capabilities[
'supports_attach_interface'],
'Driver capabilities for '
'\'supports_attach_interface\' '
'is invalid')
def test__get_hypervisor_type(self):
self.assertEqual('ironic', self.driver._get_hypervisor_type())
def test__get_hypervisor_version(self):
self.assertEqual(1, self.driver._get_hypervisor_version())
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node(self, mock_gbiui):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
mock_gbiui.return_value = node
result = self.driver._validate_instance_and_node(instance)
self.assertEqual(result.uuid, node_uuid)
mock_gbiui.assert_called_once_with(instance.uuid,
fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node_failed(self, mock_gbiui):
mock_gbiui.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertRaises(exception.InstanceNotFound,
self.driver._validate_instance_and_node, instance)
mock_gbiui.assert_called_once_with(instance.uuid,
fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__wait_for_active_pass(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYING)
fake_validate.return_value = node
self.driver._wait_for_active(instance)
fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__wait_for_active_done(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_active, instance)
fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__wait_for_active_fail(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYFAIL)
fake_validate.return_value = node
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active, instance)
fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def _wait_for_active_abort(self, instance_params, fake_validate,
fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid(),
**instance_params)
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active, instance)
# Assert _validate_instance_and_node wasn't called
self.assertFalse(fake_validate.called)
fake_refresh.assert_called_once_with()
def test__wait_for_active_abort_deleting(self):
self._wait_for_active_abort({'task_state': task_states.DELETING})
def test__wait_for_active_abort_deleted(self):
self._wait_for_active_abort({'vm_state': vm_states.DELETED})
def test__wait_for_active_abort_error(self):
self._wait_for_active_abort({'vm_state': vm_states.ERROR})
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__wait_for_power_state_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.POWER_OFF)
fake_validate.return_value = node
self.driver._wait_for_power_state(instance, 'fake message')
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__wait_for_power_state_ok(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.NOSTATE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_power_state, instance, 'fake message')
self.assertTrue(fake_validate.called)
def _test__node_resource(self, has_inst_info):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
if has_inst_info:
instance_info = _get_instance_info()
else:
instance_info = {}
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_info=instance_info,
properties=props)
result = self.driver._node_resource(node)
wantkeys = ["hypervisor_hostname", "hypervisor_type",
"hypervisor_version", "cpu_info",
"vcpus", "vcpus_used",
"memory_mb", "memory_mb_used",
"local_gb", "local_gb_used",
"disk_available_least",
"supported_instances",
"stats",
"numa_topology"]
wantkeys.sort()
gotkeys = result.keys()
gotkeys.sort()
self.assertEqual(wantkeys, gotkeys)
if has_inst_info:
props_dict = instance_info
expected_cpus = instance_info['vcpus']
else:
props_dict = props
expected_cpus = props['cpus']
self.assertEqual(0, result['vcpus'])
self.assertEqual(expected_cpus, result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(props_dict['memory_mb'], result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(props_dict['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, result['stats'])
self.assertIsNone(result['numa_topology'])
def test__node_resource(self):
self._test__node_resource(True)
def test__node_resource_no_instance_info(self):
self._test__node_resource(False)
def test__node_resource_canonicalizes_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
props['cpu_arch'] = 'i386'
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual('i686', result['supported_instances'][0][0])
self.assertEqual('i386', result['stats']['cpu_arch'])
def test__node_resource_unknown_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
del props['cpu_arch']
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual([], result['supported_instances'])
def test__node_resource_exposes_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability, test2:value2'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = result['stats']
self.assertIsNone(stats.get('capabilities'))
self.assertEqual('capability', stats.get('test'))
self.assertEqual('value2', stats.get('test2'))
def test__node_resource_no_capabilities(self):
props = _get_properties()
props['capabilities'] = None
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
self.assertIsNone(result['stats'].get('capabilities'))
def test__node_resource_malformed_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability,:no_key,no_val:'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = result['stats']
self.assertEqual('capability', stats.get('test'))
def test__node_resource_available(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=None,
power_state=ironic_states.POWER_OFF,
properties=props,
provision_state=ironic_states.AVAILABLE)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, result['stats'])
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_unavailable')
def test__node_resource_unavailable_node_res(self, mock_res_unavail):
mock_res_unavail.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=None,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(0, result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, result['stats'])
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_used')
def test__node_resource_used_node_res(self, mock_res_used):
mock_res_used.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
instance_info = _get_instance_info()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=uuidutils.generate_uuid(),
provision_state=ironic_states.ACTIVE,
properties=props,
instance_info=instance_info)
result = self.driver._node_resource(node)
self.assertEqual(0, result['vcpus'])
self.assertEqual(instance_info['vcpus'], result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(instance_info['memory_mb'], result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(instance_info['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, result['stats'])
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties(self, mock_warning):
props = _get_properties()
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
self.assertEqual(props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_bad_values(self, mock_warning):
props = _get_properties()
props['cpus'] = 'bad-value'
props['memory_mb'] = 'bad-value'
props['local_gb'] = 'bad-value'
props['cpu_arch'] = 'bad-value'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
expected_props['cpus'] = 0
expected_props['memory_mb'] = 0
expected_props['local_gb'] = 0
expected_props['cpu_arch'] = None
self.assertEqual(expected_props, parsed)
self.assertEqual(4, mock_warning.call_count)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_instance_info(self, mock_warning):
props = _get_properties()
instance_info = _get_instance_info()
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
instance_info=instance_info)
parsed = self.driver._parse_node_instance_info(node, props)
self.assertEqual(instance_info, parsed)
self.assertFalse(mock_warning.called)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_instance_info_bad_values(self, mock_warning):
props = _get_properties()
instance_info = _get_instance_info()
instance_info['vcpus'] = 'bad-value'
instance_info['memory_mb'] = 'bad-value'
instance_info['local_gb'] = 'bad-value'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
instance_info=instance_info)
parsed = self.driver._parse_node_instance_info(node, props)
expected = {
'vcpus': props['cpus'],
'memory_mb': props['memory_mb'],
'local_gb': props['local_gb']
}
self.assertEqual(expected, parsed)
self.assertEqual(3, mock_warning.call_count)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_canonicalize_cpu_arch(self, mock_warning):
props = _get_properties()
props['cpu_arch'] = 'amd64'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
# Make sure it cpu_arch was canonicalized
expected_props['cpu_arch'] = 'x86_64'
self.assertEqual(expected_props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._start_firewall(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test__stop_firewall(self, mock_ui):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._stop_firewall(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists(self, mock_call):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertTrue(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid,
fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists_fail(self, mock_call):
mock_call.side_effect = ironic_exception.NotFound
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertFalse(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid,
fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances(self, mock_inst_by_uuid, mock_call):
nodes = []
instances = []
for i in range(2):
uuid = uuidutils.generate_uuid()
instances.append(fake_instance.fake_instance_obj(self.ctx,
id=i,
uuid=uuid))
nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
mock_inst_by_uuid.side_effect = instances
mock_call.return_value = nodes
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
expected_calls = [mock.call(mock.ANY, instances[0].uuid),
mock.call(mock.ANY, instances[1].uuid)]
mock_inst_by_uuid.assert_has_calls(expected_calls)
self.assertEqual(['instance-00000000', 'instance-00000001'],
sorted(response))
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances_fail(self, mock_inst_by_uuid, mock_call):
mock_call.side_effect = exception.NovaException
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
self.assertFalse(mock_inst_by_uuid.called)
self.assertThat(response, HasLength(0))
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_list_instance_uuids(self, mock_call):
num_nodes = 2
nodes = []
for n in range(num_nodes):
nodes.append(ironic_utils.get_test_node(
instance_uuid=uuidutils.generate_uuid()))
mock_call.return_value = nodes
uuids = self.driver.list_instance_uuids()
mock_call.assert_called_with('node.list', associated=True, limit=0)
expected = [n.instance_uuid for n in nodes]
self.assertEqual(sorted(expected), sorted(uuids))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache_empty_list(self, mock_get,
mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = []
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_get.assert_called_with(node.uuid,
fields=ironic_driver._NODE_FIELDS)
mock_list.assert_called_with(detail=True, limit=0)
mock_get.side_effect = ironic_exception.NotFound
self.assertFalse(self.driver.node_is_available(node.uuid))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_list.assert_called_with(detail=True, limit=0)
self.assertEqual(0, mock_get.call_count)
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_with_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
# populate the cache
self.driver.get_available_nodes(refresh=True)
# prove that zero calls are made after populating cache
mock_list.reset_mock()
self.assertTrue(self.driver.node_is_available(node.uuid))
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
def test__node_resources_unavailable(self):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF,
'provision_state': ironic_states.AVAILABLE},
# a node in maintenance /w no instance and ERROR power state
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.ERROR,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.NOSTATE,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance or bad power state, bad provision state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.MANAGEABLE},
# a node in cleaning
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANING},
# a node in cleaning, waiting for a clean step to finish
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANWAIT},
# a node in deleting
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETING},
# a node in deleted
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETED},
# a node in AVAILABLE with an instance uuid
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_OFF,
'provision_state': ironic_states.AVAILABLE}
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_unavailable(node))
for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE):
# these are both ok and should present as available as they
# have no instance_uuid
avail_node = ironic_utils.get_test_node(
power_state=ironic_states.POWER_OFF,
provision_state=ok_state)
unavailable = self.driver._node_resources_unavailable(avail_node)
self.assertFalse(unavailable)
def test__node_resources_used(self):
node_dicts = [
# a node in maintenance /w instance and active
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.ACTIVE},
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_used(node))
unused_node = ironic_utils.get_test_node(
instance_uuid=None,
provision_state=ironic_states.AVAILABLE)
self.assertFalse(self.driver._node_resources_used(unused_node))
@mock.patch.object(FAKE_CLIENT.node, 'list')
def test_get_available_nodes(self, mock_list):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF},
# a node /w instance and power ON
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': self.instance_uuid,
'power_state': ironic_states.POWER_ON},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.ERROR},
]
nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
mock_list.return_value = nodes
available_nodes = self.driver.get_available_nodes()
expected_uuids = [n['uuid'] for n in node_dicts]
self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource(self, mock_nr, mock_list, mock_get):
node = ironic_utils.get_test_node()
node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
fake_resource = 'fake-resource'
mock_get.return_value = node
# ensure cache gets populated without the node we want
mock_list.return_value = [node_2]
mock_nr.return_value = fake_resource
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
mock_nr.assert_called_once_with(node)
mock_get.assert_called_once_with(node.uuid,
fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource_with_cache(self, mock_nr, mock_list,
mock_get):
node = ironic_utils.get_test_node()
fake_resource = 'fake-resource'
mock_list.return_value = [node]
mock_nr.return_value = fake_resource
# populate the cache
self.driver.get_available_nodes(refresh=True)
mock_list.reset_mock()
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
mock_nr.assert_called_once_with(node)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info(self, mock_gbiu):
properties = {'memory_mb': 512, 'cpus': 2}
power_state = ironic_states.POWER_ON
node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid,
properties=properties,
power_state=power_state)
mock_gbiu.return_value = node
# ironic_states.POWER_ON should be mapped to
# nova_states.RUNNING
memory_kib = properties['memory_mb'] * 1024
instance = fake_instance.fake_instance_obj('fake-context',
uuid=self.instance_uuid)
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING,
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=properties['cpus']),
result)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info_http_not_found(self, mock_gbiu):
mock_gbiu.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=uuidutils.generate_uuid())
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE),
result)
@mock.patch.object(FAKE_CLIENT, 'node')
def test_macs_for_instance(self, mock_node):
node = ironic_utils.get_test_node()
port = ironic_utils.get_test_port()
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
result = self.driver.macs_for_instance(instance)
self.assertEqual(set([port.address]), result)
mock_node.list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_macs_for_instance_http_not_found(self, mock_get):
mock_get.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, node=uuidutils.generate_uuid())
result = self.driver.macs_for_instance(instance)
self.assertIsNone(result)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
mock_node, mock_looping, mock_save):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_adf.assert_called_once_with(node, instance,
test.MatchType(objects.ImageMeta),
fake_flavor)
mock_pvifs.assert_called_once_with(node, instance, None)
mock_sf.assert_called_once_with(instance, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'active', configdrive=mock.ANY)
self.assertIsNone(instance.default_ephemeral_device)
self.assertFalse(mock_save.called)
mock_looping.assert_called_once_with(mock_wait_active,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = False
self._test_spawn()
# assert configdrive was not generated
self.assertFalse(mock_configdrive.called)
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = True
self._test_spawn()
# assert configdrive was generated
mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
extra_md={}, files=[])
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
mock_wait_active, mock_destroy,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
deploy_exc = exception.InstanceDeployFailure('foo')
fake_looping_call.wait.side_effect = deploy_exc
self.assertRaises(
exception.InstanceDeployFailure,
self.driver.spawn, self.ctx, instance, None, [], None)
self.assertEqual(0, mock_destroy.call_count)
def _test_add_driver_fields(self, mock_update=None, mock_call=None):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.driver._add_driver_fields(node, instance, image_meta, flavor)
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta.id},
{'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)},
{'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])},
{'path': '/instance_info/display_name',
'value': instance.display_name, 'op': 'add'},
{'path': '/instance_info/vcpus', 'op': 'add',
'value': str(instance.vcpus)},
{'path': '/instance_info/memory_mb', 'op': 'add',
'value': str(instance.memory_mb)},
{'path': '/instance_info/local_gb', 'op': 'add',
'value': str(node.properties.get('local_gb', 0))},
{'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid}]
if mock_call is not None:
# assert call() is invoked with retry_on_conflict False to
# avoid bug #1341420
mock_call.assert_called_once_with('node.update', node.uuid,
expected_patch,
retry_on_conflict=False)
if mock_update is not None:
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_mock_update(self, mock_update):
self._test_add_driver_fields(mock_update=mock_update)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test__add_driver_fields_mock_call(self, mock_call):
self._test_add_driver_fields(mock_call=mock_call)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
node, instance, image_meta, flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_spawn_node_driver_validation_fail(self, mock_node,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.validate.return_value = ironic_utils.get_test_validation(
power={'result': False}, deploy={'result': False})
mock_node.get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
self.assertRaises(exception.ValidationError, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
image_meta = ironic_utils.get_test_image_meta()
class TestException(Exception):
pass
mock_sf.side_effect = TestException()
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
def test_spawn_node_configdrive_fail(self,
mock_pvifs, mock_sf, mock_configdrive,
mock_node, mock_save,
mock_required_by):
mock_required_by.return_value = True
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
image_meta = ironic_utils.get_test_image_meta()
class TestException(Exception):
pass
mock_configdrive.side_effect = TestException()
with mock.patch.object(self.driver, '_cleanup_deploy',
autospec=True) as mock_cleanup_deploy:
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
mock_pvifs, mock_sf,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
fake_looping_call.wait.side_effect = ironic_exception.BadRequest
fake_net_info = utils.get_test_network_info()
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn, self.ctx, instance,
image_meta, [], None, fake_net_info)
self.assertEqual(0, mock_destroy.call_count)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
mock_wait, mock_node,
mock_save, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
self.assertTrue(mock_save.called)
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def _test_destroy(self, state, mock_cleanup_deploy, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
def fake_set_provision_state(*_):
node.provision_state = None
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.side_effect = fake_set_provision_state
self.driver.destroy(self.ctx, instance, network_info, None)
mock_node.get_by_instance_uuid.assert_called_with(
instance.uuid, fields=ironic_driver._NODE_FIELDS)
mock_cleanup_deploy.assert_called_with(node, instance, network_info)
# For states that makes sense check if set_provision_state has
# been called
if state in ironic_driver._UNPROVISION_STATES:
mock_node.set_provision_state.assert_called_once_with(
node_uuid, 'deleted')
else:
self.assertFalse(mock_node.set_provision_state.called)
def test_destroy(self):
for state in ironic_states.PROVISION_STATE_LIST:
self._test_destroy(state)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
mock_sps.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def _test__unprovision_instance(self, mock_validate_inst, mock_set_pstate,
state=None):
node = ironic_utils.get_test_node(
driver='fake',
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.driver._unprovision(instance, node)
mock_validate_inst.assert_called_once_with(instance)
mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
def test__unprovision_cleaning(self):
self._test__unprovision_instance(state=ironic_states.CLEANING)
def test__unprovision_cleanwait(self):
self._test__unprovision_instance(state=ironic_states.CLEANWAIT)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__unprovision_fail_max_retries(self, mock_validate_inst,
mock_set_pstate):
CONF.set_default('api_max_retries', default=2, group='ironic')
node = ironic_utils.get_test_node(
driver='fake',
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.assertRaises(exception.NovaException, self.driver._unprovision,
instance, node)
expected_calls = (mock.call(instance),
mock.call(instance))
mock_validate_inst.assert_has_calls(expected_calls)
mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
def test__unprovision_instance_not_found(self, mock_validate_inst,
mock_set_pstate):
node = ironic_utils.get_test_node(
driver='fake', provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.side_effect = exception.InstanceNotFound(
instance_id='fake')
self.driver._unprovision(instance, node)
mock_validate_inst.assert_called_once_with(instance)
mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unassociate_fail(self, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
mock_node.update.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(
instance.uuid, fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_reboot(self, mock_sp, fake_validate, mock_looping):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
self.driver.reboot(self.ctx, instance, None, None)
mock_sp.assert_called_once_with(node.uuid, 'reboot')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_off')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver.IronicDriver,
'_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_on')
def _test_power_on_off(self, mock_sp, fake_validate, mock_looping,
method_name=None):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=self.instance_uuid)
# Call the method under test here
if method_name == 'power_on':
self.driver.power_on(self.ctx, instance,
utils.get_test_network_info())
mock_sp.assert_called_once_with(node.uuid, 'on')
elif method_name == 'power_off':
self.driver.power_off(instance)
mock_sp.assert_called_once_with(node.uuid, 'off')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
# make the address be consistent with network_info's
port = ironic_utils.get_test_port(address=utils.FAKE_VIF_MAC)
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
port_id = six.text_type(network_info[0]['id'])
expected_patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
mock_port_udt.assert_called_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
def test_plug_vifs(self, mock__plug_vifs, mock_get):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
mock_get.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
self.driver.plug_vifs(instance, network_info)
mock_get.assert_called_once_with(node_uuid,
fields=ironic_driver._NODE_FIELDS)
mock__plug_vifs.assert_called_once_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_multiple_ports(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
first_ironic_port_uuid = 'aaaaaaaa-bbbb-1111-dddd-eeeeeeeeeeee'
first_port = ironic_utils.get_test_port(uuid=first_ironic_port_uuid,
node_uuid=node_uuid,
address='11:FF:FF:FF:FF:FF')
second_ironic_port_uuid = 'aaaaaaaa-bbbb-2222-dddd-eeeeeeeeeeee'
second_port = ironic_utils.get_test_port(uuid=second_ironic_port_uuid,
node_uuid=node_uuid,
address='22:FF:FF:FF:FF:FF')
mock_lp.return_value = [second_port, first_port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
first_vif_id = 'aaaaaaaa-vv11-cccc-dddd-eeeeeeeeeeee'
second_vif_id = 'aaaaaaaa-vv22-cccc-dddd-eeeeeeeeeeee'
first_vif = ironic_utils.get_test_vif(
address='22:FF:FF:FF:FF:FF',
id=second_vif_id)
second_vif = ironic_utils.get_test_vif(
address='11:FF:FF:FF:FF:FF',
id=first_vif_id)
network_info = [first_vif, second_vif]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
calls = (mock.call(first_ironic_port_uuid,
[{'op': 'add', 'path': '/extra/vif_port_id',
'value': first_vif_id}]),
mock.call(second_ironic_port_uuid,
[{'op': 'add', 'path': '/extra/vif_port_id',
'value': second_vif_id}]))
mock_port_udt.assert_has_calls(calls, any_order=True)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
# len(network_info) > len(ports)
network_info = (utils.get_test_network_info() +
utils.get_test_network_info())
self.assertRaises(exception.NovaException,
self.driver._plug_vifs, node, instance,
network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = []
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
expected_patch = [{'op': 'remove', 'path':
'/extra/vif_port_id'}]
self.driver.unplug_vifs(instance,
utils.get_test_network_info())
# asserts
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
mock_update.assert_called_once_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
self.driver.unplug_vifs(instance, utils.get_test_network_info())
mock_node.get.assert_called_once_with(
node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
def test_unplug_vifs_no_network_info(self, mock_update):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = []
self.driver.unplug_vifs(instance, network_info)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_unfilter_instance(self, mock_ui):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.unfilter_instance(instance, network_info)
mock_ui.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
mock_sbf.assert_called_once_with(instance, network_info)
mock_pif.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_instance_security_rules(self, mock_risr):
instance = fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_instance_security_rules(instance)
mock_risr.assert_called_once_with(instance)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_security_group_rules(self, mock_risr):
fake_group = 'fake-security-group-members'
self.driver.refresh_instance_security_rules(fake_group)
mock_risr.assert_called_once_with(fake_group)
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate, mock_looping, mock_wait_active,
preserve=False):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.rebuild(
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None,
preserve_ephemeral=preserve)
mock_save.assert_called_once_with(
expected_task_state=[task_states.REBUILDING])
mock_driver_fields.assert_called_once_with(
node, instance,
test.MatchType(objects.ImageMeta),
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_uuid,
ironic_states.REBUILD)
mock_looping.assert_called_once_with(mock_wait_active, instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(preserve=True)
def test_rebuild_no_preserve_ephemeral(self):
self._test_rebuild(preserve=False)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
exceptions = [
exception.NovaException(),
ironic_exception.BadRequest(),
ironic_exception.InternalServerError(),
]
for e in exceptions:
mock_set_pstate.side_effect = e
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None)
@mock.patch.object(FAKE_CLIENT.node, 'get')
def _test_network_binding_host_id(self, is_neutron, mock_get):
node_uuid = uuidutils.generate_uuid()
hostname = 'ironic-compute'
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid,
host=hostname)
if is_neutron:
provider = 'neutron'
expected = None
else:
provider = 'none'
expected = hostname
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5,
network_provider=provider)
mock_get.return_value = node
host_id = self.driver.network_binding_host_id(self.ctx, instance)
self.assertEqual(expected, host_id)
def test_network_binding_host_id_neutron(self):
self._test_network_binding_host_id(True)
def test_network_binding_host_id_none(self):
self._test_network_binding_host_id(False)
@mock.patch.object(instance_metadata, 'InstanceMetadata')
@mock.patch.object(configdrive, 'ConfigDriveBuilder')
class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverGenerateConfigDriveTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
node_uuid = uuidutils.generate_uuid()
self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
self.instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
self.network_info = utils.get_test_network_info()
def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta):
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.driver._generate_configdrive(self.instance, self.node,
self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
def test_generate_configdrive_fail(self, mock_cd_builder,
mock_instance_meta):
mock_cd_builder.side_effect = exception.ConfigDriveMountFailed(
operation='foo', error='error')
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.assertRaises(exception.ConfigDriveMountFailed,
self.driver._generate_configdrive,
self.instance, self.node, self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
| 47.169014
| 79
| 0.647892
| 9,319
| 80,376
| 5.219122
| 0.05462
| 0.030717
| 0.051196
| 0.038489
| 0.800831
| 0.760018
| 0.726402
| 0.698378
| 0.668401
| 0.646648
| 0
| 0.00273
| 0.261795
| 80,376
| 1,703
| 80
| 47.196712
| 0.816985
| 0.032124
| 0
| 0.599006
| 0
| 0
| 0.07734
| 0.025411
| 0
| 0
| 0
| 0
| 0.156139
| 1
| 0.07665
| false
| 0.004258
| 0.018453
| 0.002839
| 0.1022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0a0209ad5dbb303b1a095721bb81092d757703f0
| 399
|
py
|
Python
|
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
print("******* Now compiling Numba and LLVM code..... *******")
print("******* This can be VERY SLOW. Please wait.... *******\n"
"Progress: |||||||||||||||", end = "")
from hyperlearn.numba.funcs import *
print("|||||||||||||||", end = "")
from hyperlearn.utils import *
print("|||||||||||||||")
from hyperlearn.stats import *
print("******* Code has been successfully compiled!:) *******")
| 23.470588
| 64
| 0.516291
| 40
| 399
| 5.15
| 0.675
| 0.203884
| 0.165049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135338
| 399
| 16
| 65
| 24.9375
| 0.597101
| 0
| 0
| 0
| 0
| 0
| 0.550251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.555556
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
0a1177d84612d9949f365edb97453814bb6bf39b
| 5,528
|
py
|
Python
|
go_core/goboard_test.py
|
DamonDeng/thetago
|
2dba48460aaf3e3544b6d228deb4cff0d0fd7123
|
[
"MIT"
] | null | null | null |
go_core/goboard_test.py
|
DamonDeng/thetago
|
2dba48460aaf3e3544b6d228deb4cff0d0fd7123
|
[
"MIT"
] | null | null | null |
go_core/goboard_test.py
|
DamonDeng/thetago
|
2dba48460aaf3e3544b6d228deb4cff0d0fd7123
|
[
"MIT"
] | null | null | null |
from go_core.goboard import GoBoard
from gosgf import Sgf_game
from go_core.sequence_goboard import SequenceGoBoard
from go_core.array_goboard import ArrayGoBoard
from data_loader.kgs_zip_reader import KGSZipReader
import numpy as np
import time
def speed_testing():
# reader = KGSZipReader('data')
# sgf_generator = reader.get_generator()
# time_start=time.time()
# for i in range(100):
# try:
# if i%100 == 0:
# print('current game number: ' + str(i))
# (file_name, sgf_name, sgf_content) = sgf_generator.next()
# target_board = GoBoard(19)
# apply_sgf_content_to(file_name, sgf_name, sgf_content, target_board)
# except StopIteration:
# print('finished all the files')
# break
# time_end=time.time()
# print ('time for GoBoard: '+ str(time_end-time_start))
reader = KGSZipReader('data')
sgf_generator = reader.get_generator()
time_start=time.time()
for i in range(100):
try:
if i%100 == 0:
print('current game number: ' + str(i))
(file_name, sgf_name, sgf_content) = sgf_generator.next()
target_board = ArrayGoBoard(19)
apply_sgf_content_to(file_name, sgf_name, sgf_content, target_board)
except StopIteration:
print('finished all the files')
break
time_end=time.time();
print ('time for ArrayGoBoard: '+ str(time_end-time_start))
# reader = KGSZipReader('data')
# sgf_generator = reader.get_generator()
# time_start=time.time()
# for i in range(100):
# try:
# if i%100 == 0:
# print('current game number: ' + str(i))
# (file_name, sgf_name, sgf_content) = sgf_generator.next()
# target_board = SequenceGoBoard(19)
# apply_sgf_content_to(file_name, sgf_name, sgf_content, target_board)
# except StopIteration:
# print('finished all the files')
# break
# time_end=time.time();
# print ('time for SequenceGoBoard: '+ str(time_end-time_start))
def apply_sgf_content_to(file_name, sgf_name, sgf_content, go_board):
sgf = Sgf_game.from_string(sgf_content)
main_sequence_iter = sgf.main_sequence_iter()
if sgf.get_handicap() != None and sgf.get_handicap() != 0:
# print('handling handicap')
for setup in sgf.get_root().get_setup_stones():
for move in setup:
go_board.apply_move('b', move)
for item in main_sequence_iter:
color, move = item.get_move()
if not color is None and not move is None:
# print('applying move:' + str(color) + ' move:' + str(move))
go_board.apply_move(color, move)
def testing():
reader = KGSZipReader('data')
sgf_generator = reader.get_generator()
for i in range(100000):
try:
if i%100 == 0:
print('current game number: ' + str(i))
(file_name, sgf_name, sgf_content) = sgf_generator.next()
apply_sgf_content(file_name, sgf_name, sgf_content)
except StopIteration:
print('finished all the files')
break
def apply_sgf_content(file_name, sgf_name, sgf_content):
sgf = Sgf_game.from_string(sgf_content)
main_sequence_iter = sgf.main_sequence_iter()
go_board = GoBoard(19)
# sequence_go_board = SequenceGoBoard(19)
array_go_board = ArrayGoBoard(19)
if sgf.get_handicap() != None and sgf.get_handicap() != 0:
# print('handling handicap')
for setup in sgf.get_root().get_setup_stones():
for move in setup:
go_board.apply_move('b', move)
array_go_board.apply_move('b', move)
for item in main_sequence_iter:
color, move = item.get_move()
if not color is None and not move is None:
# print('applying move:' + str(color) + ' move:' + str(move))
go_board.apply_move(color, move)
array_go_board.apply_move(color, move)
go_board_result = go_board.get_array_result()
array_go_board_result = array_go_board.get_array_result()
if not array_equal(go_board_result, array_go_board_result):
print('inconsist!!!!!' + file_name)
print('sgf file: ' + sgf_name)
print('color:' + str(color))
print('move: ' + str(move))
print(go_board)
print(array_go_board)
def array_equal(first_array, second_array):
i_range = len(first_array)
j_range = len(first_array)
for i in range(i_range):
for j in range(j_range):
if not first_array[i][j] == second_array[i][j]:
return False
return True
def move_array_test():
reader = KGSZipReader('data')
sgf_generator = reader.get_generator()
for i in range(10):
try:
print('current game number: ' + str(i))
(file_name, sgf_name, sgf_content) = sgf_generator.next()
go_board = ArrayGoBoard(19)
sgf = Sgf_game.from_string(sgf_content)
main_sequence_iter = sgf.main_sequence_iter()
if sgf.get_handicap() != None and sgf.get_handicap() != 0:
# print('handling handicap')
for setup in sgf.get_root().get_setup_stones():
for move in setup:
go_board.apply_move('b', move)
for item in main_sequence_iter:
color, move = item.get_move()
if not color is None and not move is None:
# print('applying move:' + str(color) + ' move:' + str(move))
go_board.apply_move(color, move)
move_array = go_board.get_move_array(8, color)
print ('-----------------------------')
print (str(move_array[0]))
except StopIteration:
print('finished all the files')
break
# testing()
# speed_testing()
move_array_test()
| 29.248677
| 76
| 0.653401
| 772
| 5,528
| 4.411917
| 0.11399
| 0.045214
| 0.035526
| 0.048444
| 0.765414
| 0.74633
| 0.721374
| 0.721374
| 0.693188
| 0.66559
| 0
| 0.012155
| 0.226122
| 5,528
| 189
| 77
| 29.248677
| 0.784011
| 0.243488
| 0
| 0.543689
| 0
| 0
| 0.056239
| 0.007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058252
| false
| 0
| 0.067961
| 0
| 0.145631
| 0.145631
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0a3a30e7909ba3672e083498c34575db14a1c3bf
| 97
|
py
|
Python
|
StudentData/apps.py
|
backstab319/CollegeDataManagement
|
39864efc3de706f9479f6fff29c1a1a9326c7b73
|
[
"BSD-3-Clause"
] | null | null | null |
StudentData/apps.py
|
backstab319/CollegeDataManagement
|
39864efc3de706f9479f6fff29c1a1a9326c7b73
|
[
"BSD-3-Clause"
] | null | null | null |
StudentData/apps.py
|
backstab319/CollegeDataManagement
|
39864efc3de706f9479f6fff29c1a1a9326c7b73
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class StudentdataConfig(AppConfig):
name = 'StudentData'
| 16.166667
| 35
| 0.773196
| 10
| 97
| 7.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154639
| 97
| 5
| 36
| 19.4
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
0a41d17bc70163b5daa211227b33794caf96d50f
| 72
|
py
|
Python
|
datefns/__init__.py
|
ktr/datefns
|
0b9ac5f36578894705a0cf5bcd8dfd15c00e967b
|
[
"MIT"
] | null | null | null |
datefns/__init__.py
|
ktr/datefns
|
0b9ac5f36578894705a0cf5bcd8dfd15c00e967b
|
[
"MIT"
] | null | null | null |
datefns/__init__.py
|
ktr/datefns
|
0b9ac5f36578894705a0cf5bcd8dfd15c00e967b
|
[
"MIT"
] | null | null | null |
from .datefns import *
from .timefns import *
__version__ = '0.0.1a8'
| 12
| 23
| 0.694444
| 10
| 72
| 4.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.180556
| 72
| 5
| 24
| 14.4
| 0.711864
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
0a42ffe3ebc08032806b7c5e582485e739b8efe2
| 145
|
py
|
Python
|
python/ex17.py
|
kazushiyuuki/Lab-ICC
|
f84ac28b230c9a43d1d323cf0826237c39608429
|
[
"MIT"
] | null | null | null |
python/ex17.py
|
kazushiyuuki/Lab-ICC
|
f84ac28b230c9a43d1d323cf0826237c39608429
|
[
"MIT"
] | null | null | null |
python/ex17.py
|
kazushiyuuki/Lab-ICC
|
f84ac28b230c9a43d1d323cf0826237c39608429
|
[
"MIT"
] | null | null | null |
a = int(input())
b = int(input())
c = int(input())
max = a
if max < b:
max = b
if max < c:
max = c
elif max < c:
max = c
print(max)
| 11.153846
| 16
| 0.489655
| 28
| 145
| 2.535714
| 0.321429
| 0.225352
| 0.197183
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.324138
| 145
| 13
| 17
| 11.153846
| 0.72449
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0a49c12160e1c40311ffe4ef7424ab453f8bd70b
| 561
|
py
|
Python
|
pyNastran/op2/errors.py
|
als0052/pyNastran
|
8493323c30475d1fc3238eed7480bfe9015ce233
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/errors.py
|
als0052/pyNastran
|
8493323c30475d1fc3238eed7480bfe9015ce233
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/errors.py
|
als0052/pyNastran
|
8493323c30475d1fc3238eed7480bfe9015ce233
|
[
"BSD-3-Clause"
] | null | null | null |
from pyNastran.f06.errors import FatalError
class FortranMarkerError(Exception):
pass
class EmptyRecordError(SyntaxError):
pass
class SortCodeError(RuntimeError):
pass
class DeviceCodeError(SyntaxError):
pass
class MultipleSolutionNotImplementedError(NotImplementedError):
pass
class MixedVersionCard(RuntimeError):
pass
class SixtyFourBitError(NotImplementedError):
pass
class OverwriteTableError(RuntimeError):
pass
class DoubleCardError(RuntimeError):
pass
class EmptyCardError(RuntimeError):
pass
| 13.357143
| 63
| 0.775401
| 46
| 561
| 9.456522
| 0.478261
| 0.186207
| 0.193103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004283
| 0.167558
| 561
| 41
| 64
| 13.682927
| 0.927195
| 0
| 0
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.47619
| 0.047619
| 0
| 0.52381
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
0a61c71d902ed1dcf85b9656ab3824848dca88bd
| 25
|
py
|
Python
|
tests/__init__.py
|
trickeydan/pepper2
|
3aba1c74568cd0a04c9178caba26e9238c90e9ba
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
trickeydan/pepper2
|
3aba1c74568cd0a04c9178caba26e9238c90e9ba
|
[
"MIT"
] | 22
|
2019-12-22T20:11:24.000Z
|
2020-01-18T19:09:11.000Z
|
tests/__init__.py
|
j5api/pepper2
|
3aba1c74568cd0a04c9178caba26e9238c90e9ba
|
[
"MIT"
] | null | null | null |
"""Tests for pepper2."""
| 12.5
| 24
| 0.6
| 3
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.12
| 25
| 1
| 25
| 25
| 0.636364
| 0.72
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0a66b427bbb59d08092115e691f648c245d168f9
| 344
|
py
|
Python
|
src/html_generators/_utils.py
|
quadrant-newmedia/html_generators
|
c78bb28518efd212a13541c794e8ae426818bb9e
|
[
"MIT"
] | 1
|
2022-01-19T11:02:00.000Z
|
2022-01-19T11:02:00.000Z
|
src/html_generators/_utils.py
|
quadrant-newmedia/html_generators
|
c78bb28518efd212a13541c794e8ae426818bb9e
|
[
"MIT"
] | null | null | null |
src/html_generators/_utils.py
|
quadrant-newmedia/html_generators
|
c78bb28518efd212a13541c794e8ae426818bb9e
|
[
"MIT"
] | 1
|
2022-01-19T11:02:08.000Z
|
2022-01-19T11:02:08.000Z
|
'''
Various utility methods for building html attributes.
'''
def styles(*styles):
'''Join multiple "conditional styles" and return a single style attribute'''
return '; '.join(filter(None, styles))
def classes(*classes):
'''Join multiple "conditional classes" and return a single class attribute'''
return ' '.join(filter(None, classes))
| 34.4
| 78
| 0.726744
| 43
| 344
| 5.813953
| 0.511628
| 0.096
| 0.184
| 0.128
| 0.232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133721
| 344
| 10
| 79
| 34.4
| 0.838926
| 0.569767
| 0
| 0
| 0
| 0
| 0.023077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
6a5f4c36218a0cd62343e835f253ab69d9caa7b4
| 23
|
py
|
Python
|
packages/snet_cli/snet/snet_cli/version.py
|
Vivek205/snet-cli
|
e5bf64ab6086af71eafc18b86040fa91ad2c5e9f
|
[
"MIT"
] | 1
|
2021-02-16T17:03:54.000Z
|
2021-02-16T17:03:54.000Z
|
packages/snet_cli/snet/snet_cli/version.py
|
Vivek205/snet-cli
|
e5bf64ab6086af71eafc18b86040fa91ad2c5e9f
|
[
"MIT"
] | null | null | null |
packages/snet_cli/snet/snet_cli/version.py
|
Vivek205/snet-cli
|
e5bf64ab6086af71eafc18b86040fa91ad2c5e9f
|
[
"MIT"
] | null | null | null |
__version__ = "v1.1.6"
| 11.5
| 22
| 0.652174
| 4
| 23
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.130435
| 23
| 1
| 23
| 23
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6a7f93bb57b52cfb86a0db8287aed9b39b1c35bb
| 122
|
py
|
Python
|
packages/dask-task-models-library/src/dask_task_models_library/__init__.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | null | null | null |
packages/dask-task-models-library/src/dask_task_models_library/__init__.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | 55
|
2018-05-15T09:47:00.000Z
|
2022-03-31T06:56:50.000Z
|
packages/dask-task-models-library/src/dask_task_models_library/__init__.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 1
|
2020-04-22T15:06:58.000Z
|
2020-04-22T15:06:58.000Z
|
import pkg_resources
__version__: str = pkg_resources.get_distribution(
"simcore-dask-task-models-library"
).version
| 20.333333
| 50
| 0.795082
| 15
| 122
| 6
| 0.8
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106557
| 122
| 5
| 51
| 24.4
| 0.825688
| 0
| 0
| 0
| 0
| 0
| 0.262295
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6a87e55dfbcff8cc9517854eb5c942b03924efa5
| 1,125
|
py
|
Python
|
utils/messages.py
|
LUNA761/RickBot
|
8798e08cd1e05c0b3440e27ce0e3d48f4b9b6f12
|
[
"MIT"
] | 1
|
2022-03-03T21:38:34.000Z
|
2022-03-03T21:38:34.000Z
|
utils/messages.py
|
LUNA761/RickBot
|
8798e08cd1e05c0b3440e27ce0e3d48f4b9b6f12
|
[
"MIT"
] | null | null | null |
utils/messages.py
|
LUNA761/RickBot
|
8798e08cd1e05c0b3440e27ce0e3d48f4b9b6f12
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2022, Zach Lagden
All rights reserved.
"""
import discord
from discord.ext import commands
from .embed import Embed
async def cb_reply(ctx: commands.Context, content: str, mention: bool = False):
return await ctx.message.reply(embed=Embed(description=f"`{content}`").raw(), mention_author=mention)
async def cb_reply_edit(msg: discord.Message, content: str, mention: bool = False):
return await msg.edit(embed=Embed(description=f"`{content}`").raw())
async def cb_send(ctx: commands.Context, content: str, mention: bool = False):
return await ctx.send(embed=Embed(description=f"`{content}`").raw(), mention_author=mention)
async def e_reply(ctx: commands.Context, content: str, mention: bool = False):
return await ctx.message.reply(embed=Embed(description=content).raw(), mention_author=mention)
async def reply(ctx: commands.Context, content: str, mention: bool = False):
return await ctx.message.reply(content, mention_author=mention)
async def send(ctx: commands.Context, content: str, mention: bool = False):
return await ctx.send(content, mention_author=mention)
| 33.088235
| 105
| 0.742222
| 157
| 1,125
| 5.254777
| 0.229299
| 0.058182
| 0.123636
| 0.152727
| 0.767273
| 0.733333
| 0.694545
| 0.603636
| 0.603636
| 0.603636
| 0
| 0.004065
| 0.125333
| 1,125
| 33
| 106
| 34.090909
| 0.83435
| 0.046222
| 0
| 0
| 0
| 0
| 0.030986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
0a8b1f9750c9b351dae0f0f98acadcb01876caf0
| 239
|
py
|
Python
|
server/users/serializers/organizations.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | null | null | null |
server/users/serializers/organizations.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | 13
|
2018-09-29T21:34:25.000Z
|
2018-12-15T18:54:52.000Z
|
server/users/serializers/organizations.py
|
yizhang7210/Syllable
|
0536763a21db9532fc73cd32d03a7732d73f4ab8
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from users.models.organizations import Organization
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ('id', 'name', 'domain')
| 26.555556
| 58
| 0.74477
| 23
| 239
| 7.695652
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175732
| 239
| 8
| 59
| 29.875
| 0.898477
| 0
| 0
| 0
| 0
| 0
| 0.050209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
0ad0dfcdad536b5f9b0aab980c6f0d035a99381d
| 1,235
|
py
|
Python
|
tests/test_plugin_zattoo.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2017-11-26T18:48:29.000Z
|
2017-11-26T18:48:29.000Z
|
tests/test_plugin_zattoo.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_plugin_zattoo.py
|
eleeeeeee/abc
|
5d81d68c7a47e931f050632ae7cddb3b044971b4
|
[
"BSD-2-Clause"
] | 1
|
2021-06-03T23:08:48.000Z
|
2021-06-03T23:08:48.000Z
|
import unittest
from streamlink.plugins.zattoo import Zattoo
class TestPluginZattoo(unittest.TestCase):
def test_can_handle_url(self):
# ewe live
self.assertTrue(Zattoo.can_handle_url('http://tvonline.ewe.de/watch/daserste'))
self.assertTrue(Zattoo.can_handle_url('http://tvonline.ewe.de/watch/zdf'))
# netcologne live
self.assertTrue(Zattoo.can_handle_url('https://nettv.netcologne.de/watch/daserste'))
self.assertTrue(Zattoo.can_handle_url('https://nettv.netcologne.de/watch/zdf'))
# zattoo live
self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/watch/daserste'))
self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/watch/zdf'))
# zattoo vod
self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/ondemand/watch/ibR2fpisWFZGvmPBRaKnFnuT-alarm-am-airport'))
self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/ondemand/watch/G8S7JxcewY2jEwAgMzvFWK8c-berliner-schnauzen'))
# shouldn't match
self.assertFalse(Zattoo.can_handle_url('https://ewe.de'))
self.assertFalse(Zattoo.can_handle_url('https://netcologne.de'))
self.assertFalse(Zattoo.can_handle_url('https://zattoo.com'))
| 49.4
| 127
| 0.719838
| 155
| 1,235
| 5.574194
| 0.264516
| 0.125
| 0.166667
| 0.229167
| 0.704861
| 0.704861
| 0.704861
| 0.645833
| 0.543981
| 0.516204
| 0
| 0.004704
| 0.139271
| 1,235
| 24
| 128
| 51.458333
| 0.80809
| 0.051012
| 0
| 0
| 0
| 0
| 0.35506
| 0
| 0
| 0
| 0
| 0
| 0.733333
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0ae199629e255a55d28132986561c869fd109846
| 238
|
py
|
Python
|
autogl/module/hpo/suggestion/early_stop_algorithm/early_stop_first_trial.py
|
general502570/AutoGL
|
cbac7954db8555af576692384b09d305a4ef67c2
|
[
"MIT"
] | 824
|
2020-11-30T14:38:07.000Z
|
2022-03-19T10:14:04.000Z
|
autogl/module/hpo/suggestion/early_stop_algorithm/early_stop_first_trial.py
|
lucylow/AutoGL
|
e560ca7ef3b35a8b85d9b89063ce5ce7afaed52f
|
[
"MIT"
] | 38
|
2020-12-21T12:32:57.000Z
|
2022-01-31T02:32:05.000Z
|
autogl/module/hpo/suggestion/early_stop_algorithm/early_stop_first_trial.py
|
lucylow/AutoGL
|
e560ca7ef3b35a8b85d9b89063ce5ce7afaed52f
|
[
"MIT"
] | 85
|
2020-12-21T05:16:09.000Z
|
2022-03-28T08:44:22.000Z
|
from suggestion.early_stop_algorithm.abstract_early_stop import (
AbstractEarlyStopAlgorithm,
)
class EarlyStopFirstTrialAlgorithm(AbstractEarlyStopAlgorithm):
def get_early_stop_trials(self, trials):
return [trials[0]]
| 26.444444
| 65
| 0.802521
| 23
| 238
| 8
| 0.695652
| 0.146739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004831
| 0.130252
| 238
| 8
| 66
| 29.75
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
0aef85f46f929aacb67d9c3b5b200885f0cb7792
| 190
|
py
|
Python
|
Sets/Sets_Eliminar.py
|
BrianMarquez3/Python-Course
|
2622b4ddfd687505becfd246e82a2ed0cb9b76f3
|
[
"MIT"
] | 20
|
2020-08-19T23:27:01.000Z
|
2022-02-03T12:02:17.000Z
|
Sets/Sets_Eliminar.py
|
BrianMarquez3/Python-Course
|
2622b4ddfd687505becfd246e82a2ed0cb9b76f3
|
[
"MIT"
] | 1
|
2021-04-10T18:06:05.000Z
|
2021-04-10T18:06:05.000Z
|
Sets/Sets_Eliminar.py
|
BrianMarquez3/Python-Course
|
2622b4ddfd687505becfd246e82a2ed0cb9b76f3
|
[
"MIT"
] | 2
|
2020-12-03T19:35:36.000Z
|
2021-11-10T14:58:39.000Z
|
#Eliminar
conjuntos = set()
conjuntos = {1,2,3,"brian", 4.6}
conjuntos.discard(3)
print (conjuntos)
("==================================================================================")
| 21.111111
| 86
| 0.368421
| 15
| 190
| 4.666667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034286
| 0.078947
| 190
| 8
| 87
| 23.75
| 0.365714
| 0.042105
| 0
| 0
| 0
| 0
| 0.480663
| 0.453039
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
0af374bcb532b4870abb788973b7ef113795908f
| 242
|
py
|
Python
|
wagtail/wagtailsnippets/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 3
|
2016-08-17T13:56:36.000Z
|
2019-04-23T19:59:25.000Z
|
wagtail/wagtailsnippets/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 11
|
2016-08-05T15:43:06.000Z
|
2016-12-16T13:32:23.000Z
|
wagtail/wagtailsnippets/apps.py
|
patphongs/wagtail
|
32555f7a1c599c139e0f26c22907c9612af2e015
|
[
"BSD-3-Clause"
] | 2
|
2017-08-08T01:39:02.000Z
|
2018-05-06T06:16:10.000Z
|
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
class WagtailSnippetsAppConfig(AppConfig):
name = 'wagtail.wagtailsnippets'
label = 'wagtailsnippets'
verbose_name = "Wagtail snippets"
| 24.2
| 56
| 0.789256
| 24
| 242
| 7.666667
| 0.708333
| 0.119565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14876
| 242
| 9
| 57
| 26.888889
| 0.893204
| 0
| 0
| 0
| 0
| 0
| 0.223141
| 0.095041
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
e40b8922234e1569224b37ab6fe5115c827b8061
| 299
|
py
|
Python
|
src/compas/datastructures/mesh/operations/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/mesh/operations/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/mesh/operations/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
from .collapse import *
from .insert import *
from .split import *
from .swap import *
from .weld import *
from .collapse import __all__ as a
from .insert import __all__ as c
from .split import __all__ as d
from .swap import __all__ as e
from .weld import __all__ as f
__all__ = a + c + d + e + f
| 21.357143
| 34
| 0.722408
| 51
| 299
| 3.764706
| 0.27451
| 0.260417
| 0.286458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210702
| 299
| 13
| 35
| 23
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.909091
| 0
| 0.909091
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7c38218c6c3c3b7ff9aca60cf305bf546e5e879a
| 66,351
|
py
|
Python
|
ex1_w2v/attentionMain.py
|
minkj1992/Korean_emotion_classification_attention
|
7e09cd8cd9b18c438d7b9032efb8ee37c505a7fa
|
[
"MIT"
] | 3
|
2020-04-24T12:54:37.000Z
|
2021-12-21T02:03:06.000Z
|
ex1_w2v/attentionMain.py
|
minkj1992/Korean_emotion_classification_attention
|
7e09cd8cd9b18c438d7b9032efb8ee37c505a7fa
|
[
"MIT"
] | null | null | null |
ex1_w2v/attentionMain.py
|
minkj1992/Korean_emotion_classification_attention
|
7e09cd8cd9b18c438d7b9032efb8ee37c505a7fa
|
[
"MIT"
] | 3
|
2020-03-10T10:50:03.000Z
|
2021-12-21T02:03:07.000Z
|
import numpy as np
import os, re, csv, math, codecs
from keras.models import Sequential
from keras.layers import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer as t
from tqdm import tqdm
from keras.layers import merge, Dense, Input, LSTM, Embedding, Dropout, Activation, SpatialDropout1D
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import Bidirectional
from keras.models import *
# to visualize, and to make zero shape matrix
from attention_utils import get_activations, get_data_recurrent
from Attention import Attention
import pandas as pd
from konlpy.tag import Okt as Twitter
from selfword2vec import tokenization
from Anomaly import checkAnomaly_x_y
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from numpy import argmax
from keras import backend as K
import tensorflow as tf
from sklearn.metrics import fbeta_score
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.losses import binary_crossentropy
import json
# 1. 모델 저장시키기
# 2. tokenizer와 konlpy morphs 호환 여부 (완성 jupyter note)
# 2'. word2vec 2가지 경우 더 추가 to embedding(final_total word2vec, twitter_translated.vec)
# 3. 변수들 설정하기
np.random.seed(3)
# NUM_WORDS, train으로 input받은 단어의 수
MAX_NB_WORDS = 20000
vocab_size = 0
EMB_DIM = 300
embeddings_index = dict()
# columns = ["ID", "Tweet", "anger", "anticipation", "disgust", "fear", "joy", "love", "optimism", "pessimism","sadness","surprise","trust"]
columns = ["ID","Tweet","분노","기대","혐오스러운","두려움","기쁨","사랑","낙관론","비관론","슬픔","놀라움","믿음"]
##########################################1. 데이터셋 생성하기
train_array = pd.read_csv("/home/minwookje/coding/ex1_w2v/data/tweet/dump/kor_train.txt",sep="\t", header=None,names=columns).values
val_array = pd.read_csv("/home/minwookje/coding/ex1_w2v/data/tweet/dump/kor_dev.txt",sep="\t", header=None,names=columns).values
test_array = pd.read_csv("/home/minwookje/coding/ex1_w2v/data/tweet/dump/kor_test_gold.txt",sep="\t", header=None,names=columns).values
# 판다 shape
print("train_array"+ str(train_array.shape))
print("val_array"+str(val_array.shape))
print("test_array"+str(test_array.shape))
print("Reading data!")
# x, y 분할하기
x_train = train_array[1:,1]
y_train = train_array[1:,2:]
x_val = val_array[1:,1]
y_val = val_array[1:,2:]
x_test = test_array[1:,1]
y_test = test_array[1:,2:]
print("checking Anomaly!!")
x_train, y_train = checkAnomaly_x_y(x_train,y_train)
x_val, y_val = checkAnomaly_x_y(x_val,y_val)
x_test, y_test = checkAnomaly_x_y(x_test,y_test)
print("ANomaly result!")
print("x_train.shape"+ str(x_train.shape))
print("y_train.shape"+ str(y_train.shape))
print("x_val.shape"+ str(x_val.shape))
print("y_val.shape"+ str(y_val.shape))
print("x_test.shape"+ str(x_test.shape))
print("y_test.shape"+ str(y_test.shape))
# print(type(x_train)) np.array로 변형이 필요한가
print("Finished!")
# jupyter notebook
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
tmp = []
train_tmp = []
val_tmp = []
test_tmp = []
max_count = 0
set_words = set()
print("Tokenizing!")
# test의 tmp만 따로 받아주고 나머지는 val train test모두 통합시켜준다.
test_tmp, dummy , set_words = tokenization(x_test)
val_tmp, dummy , set_words = tokenization(x_val)
train_tmp, max_count , set_words = tokenization(x_train)
tmp, dummy , set_words = tokenization(np.hstack([x_train,x_val,x_test]))
print("Tokenizing finished!")
tmp = [] # tmp없애준다
#token shape
print("It's by len()")
print("train_tmp.len():" + str(len(train_tmp))+","+ str(len(train_tmp[0])))
print("val_tmp shape.len():" + str(len(val_tmp))+","+ str(len(val_tmp[0])))
print("test_tmp shape.len():" + str(len(test_tmp))+","+ str(len(test_tmp[0])))
print("max_count:"+str(max_count))
# 문장길이 100으로 맞춘다.
max_count = min(100, max_count)
print("Readding Embedding file")
# embeddings_index == dict(w2v's word: vector)
f = codecs.open('/home/minwookje/coding/ex1_w2v/embedding/1542954106final_total_pos.vec', encoding='utf-8')
for line in tqdm(f):
values = line.rstrip().rsplit(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
# word2index table 생성 {token:index}
# 모든 train val test에 사용되는 token별 index table
# train 문장 token 갯수
word_index = len(set_words)
word_tmp_dict = dict()
for i, word in enumerate(set_words):
word_tmp_dict[str(word).replace(" ", "")] = i
word_tmp_dict['0'] = word_index
#dict file 저장
with open('token2index.json','w') as dictionary_file:
json.dump(word_tmp_dict,dictionary_file)
## 문장별 토큰화시킨 녀석에 index를 집어 넣어준다. 이때 pad도 동시에 해준다.
word_vec = []
word_vec_test = []
word_vec_val = []
for sent in train_tmp:
sub = []
for word in sent:
if(len(sub)==max_count):
break
#print(word)
#print(type(str(word)))
#break
# word는 tuple 타입, embeddings_index는 str타입, tuple 타입을 str()화시키면
# 중간에 space가 생성되어 match가 되지 않았다. 이를 해결해주었다.
if(str(word).replace(" ", "") in word_tmp_dict):
sub.append(word_tmp_dict[str(word).replace(" ", "")])
else:
print("sentence index화 실패")
count = max_count - len(sub)
# padding
sub.extend([word_index]*count)
word_vec.append(sub)
## 테스트용 복사본
for sent in test_tmp:
sub = []
for word in sent:
if(len(sub)==max_count):
break
#print(word)
#print(type(str(word)))
#break
# word는 tuple 타입, embeddings_index는 str타입, tuple 타입을 str()화시키면
# 중간에 space가 생성되어 match가 되지 않았다. 이를 해결해주었다.
if(str(word).replace(" ", "") in word_tmp_dict):
sub.append(word_tmp_dict[str(word).replace(" ", "")])
else:
print("sentence index화 실패")
count = max_count - len(sub)
# padding
sub.extend([word_index]*count)
word_vec_test.append(sub)
## 검증용 복사본
for sent in val_tmp:
sub = []
for word in sent:
if(len(sub)==max_count):
break
#print(word)
#print(type(str(word)))
#break
# word는 tuple 타입, embeddings_index는 str타입, tuple 타입을 str()화시키면
# 중간에 space가 생성되어 match가 되지 않았다. 이를 해결해주었다.
if(str(word).replace(" ", "") in word_tmp_dict):
sub.append(word_tmp_dict[str(word).replace(" ", "")])
else:
print("sentence index화 실패")
count = max_count - len(sub)
# padding
sub.extend([word_index]*count)
word_vec_val.append(sub)
print("word_vec shape:" + str(len(word_vec))+","+ str(len(word_vec[0])))
print("word_vec_val shape:" + str(len(word_vec_val))+","+ str(len(word_vec_val[0])))
print("word_vec_test shape:" + str(len(word_vec_test))+","+ str(len(word_vec_test[0])))
# 4번쨰 matrix embedding_matrix {index: vector}
# vocab_size = min(MAX_NB_WORDS, word_index)
vocab_size = word_index
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocab_size+1, EMB_DIM))
match_count = 0
unmatch_count = 0
for word, i in word_tmp_dict.items():
if word != '0':
if (word in embeddings_index):
match_count += 1
# embedding_matrix[i] = np.zeros(300)
embedding_matrix[i] = embeddings_index[word]
# embedding_matrix[i] = np.random.uniform(-0.25,0.25,300)
else:
unmatch_count += 1
# embedding_matrix[i] = np.zeros(300)
# embedding_matrix[i] = np.random.uniform(-0.25,0.25,300) ## used for OOV words
embedding_matrix[i] = np.random.uniform(-1.0,1.0,300).astype('float32')
print("match:" + str(match_count))
print("unmatch:" + str(unmatch_count))
embedding_matrix.tofile('index2vec.dat')
# 여기부터
# 1.앞서 단어당 벡터 테이블(v) // embeddings_index, {w2v_word: vector}
# train_word(str(word).replace(" ", "")) == embedding
# 2.train 단어별 index (v) // word_tmp_dict {train_word(str(word).replace(" ", "")):index}
# 3.sentence padding, sentence to index
# 4.index당 vector table (v) //embedding_matrix {index: vector} 이녀석을 embedding weight에 넣어주어야 한다.
# 문장 = [index들 나열 ]
# 즉 embedding_matrix로 index를 seq에 넣어준묹장들을 train에 넣어줘야한다.
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# np.array화
print("word_vec shape:" + str(len(word_vec))+","+ str(len(word_vec[0])))
print("word_vec_val shape:" + str(len(word_vec_val))+","+ str(len(word_vec_val[0])))
print("word_vec_test shape:" + str(len(word_vec_test))+","+ str(len(word_vec_test[0])))
# np.savetxt('word_vec1.txt', word_vec[:500], delimiter=" ", fmt="%s")
# np.savetxt('word_vec_val1.txt', word_vec_val[:500], delimiter=" ", fmt="%s")
# np.savetxt('word_vec_test1.txt', word_vec_test[:500], delimiter=" ", fmt="%s")
word_vec = np.array(word_vec)
word_vec_val = np.array(word_vec_val)
word_vec_test = np.array(word_vec_test)
embedding_matrix = np.matrix(embedding_matrix)
y_train = np.array(y_train)
y_val = np.array(y_val)
y_test = np.array(y_test)
print("word_vec shape:" + str(word_vec.shape))
print("word_vec_val shape:" + str(word_vec_val.shape))
print("word_vec_test shape:" + str(word_vec_test.shape))
print("y_train shape:" + str(y_train.shape))
print("y_val shape:" + str(y_val.shape))
print("y_test shape:" + str(y_test.shape))
print("embedding_matrix shape:" + str(embedding_matrix.shape))
# 확인용
np.savetxt('word_vec.txt', word_vec[:500], delimiter=" ", fmt="%s")
np.savetxt('word_vec_val.txt', word_vec_val[:500], delimiter=" ", fmt="%s")
np.savetxt('word_vec_test.txt', word_vec_test[:500], delimiter=" ", fmt="%s")
np.savetxt('embedding_matrix.txt', embedding_matrix[:600], delimiter=" ", fmt="%s")
# TODO
# 1.padding 23976이 너무 많이 들어간다. 이거 max_count를 정해주어야 한다. 100까지로 줄이자.
# 2.그 다음에는 matrix 사이즈를 맞춰주어야 한다.
# def fit_multilabel(model, X_train, X_val, y_train, y_val):
# y_val = np.array(y_val)
# y_train = np.array(y_train)
# predictions = np.zeros(y_val.shape)
# for i in range(y_val.shape[1]):
# model.fit(X_train, y_train[:, i])
# y_p = model.predict(X_val)
# predictions[:, i] = y_p
# return predictions
# from sklearn.metrics import jaccard_similarity_score
# def jaccard_distance_loss(y_true, y_pred, smooth=100):
# """
# Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
# = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
# The jaccard distance loss is usefull for unbalanced datasets. This has been
# shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
# gradient.
# Ref: https://en.wikipedia.org/wiki/Jaccard_index
# @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
# @author: wassname
# """
# print("loss = %s, %s"%(y_true,y_pred))
# y_pred = K.cast_to_floatx(y_pred)
# y_true = K.cast_to_floatx(y_true)
# intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
# sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
# jac = (intersection + smooth) / (sum_ - intersection + smooth)
# print("loss = %s, %s, %s"%(intersection, sum, jac))
# np.zeros_like(y_true, dtype = object)
# np.zeros_like(y_pred, dtype = object)
# im1 = np.asarray(y_true).astype(np.bool)
# im2 = np.asarray(y_pred).astype(np.bool)
# intersection = np.logical_and(im1, im2)
# union = np.logical_or(im1, im2)
# # intersection = K.sum(np.absolute(y_true * y_pred), axis=-1)
# # sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
# # jac = (intersection + smooth) / (sum_ - intersection + smooth)
# jac = (intersection.sum()+smooth) / (float(union.sum()+smooth))
# print("loss = %s, %s, %s"%(intersection, sum, jac))
# print("intersection")
# print(intersection)
# # print(K.eval(intersection))
# print("sum")
# print(sum)
# # print(K.eval(sum))
# print("jac")
# print(jac)
# # print(K.eval(jac))
# print("K.eval(1-jac)*smooth")
# print((1 - jac) * smooth)
# print(type((1 - jac) * smooth))
# print(K.eval(1-jac)*smooth)
# returnimport tensorflow as tf (1 - jac) * smooth
# def jaccard_distance(y_true, y_pred, smooth=100):
# intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
# sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
# jac = (intersection + smooth) / (sum_ - intersection + smooth)
# return (1 - jac) * smooth
#####################################2. 모델 구성하기
# input_length : 단어의 수 즉 문장의 길이를 나타냅니다
# 임베딩 레이어의 출력 크기는 샘플 수 * output_dim * input_lenth가 됩니다
INPUT_DIM = 300 #wordvec사이즈
max_count = max_count #문장의 max길이
lstm_shape = 250
rate_drop_lstm = 0.3
rate_drop_dense = 0.3
# k_vec = K.variable(word_vec)
# k_vec_val = K.variable(word_vec_val)
# k_vec_test = K.variable(word_vec_test)
# k_y_train = K.variable(y_train)
# k_y_val = K.variable(y_val)
# k_y_test = K.variable(y_test)
# k_embedding_matrix = K.variable(embedding_matrix)
k_vec = word_vec
k_vec_val = word_vec_val
k_vec_test = word_vec_test
k_y_train = y_train
k_y_val = y_val
k_y_test = y_test
k_embedding_matrix = embedding_matrix
# nan delete
index = np.argwhere(np.isnan(k_y_train))[:,0]
index2 = np.argwhere(np.isnan(k_y_val))[:,0]
index3 = np.argwhere(np.isnan(k_y_test))[:,0]
k_y_train = np.delete(k_y_train,index,0)
k_vec = np.delete(k_vec,index,0)
k_y_val = np.delete(k_y_val,index2,0)
k_vec_val = np.delete(k_vec_val,index2,0)
k_vec_test = np.delete(k_vec_test,index3,0)
k_y_test = np.delete(k_y_test,index3,0)
# is nan check
print(np.any(np.isnan(k_vec)))
print(np.any(np.isnan(k_vec_val)))
print(np.any(np.isnan(k_vec_test)))
print(np.any(np.isnan(k_y_train)))
print(np.any(np.isnan(k_y_val)))
print(np.any(np.isnan(k_y_test)))
print(np.any(np.isnan(k_embedding_matrix)))
def jaccard_distance_loss(y_true, y_pred, smooth=100):
"""
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
The jaccard distance loss is usefull for unbalanced datasets. This has been
shifted so it converges on 0 and is smoothed to avoid exploding or disapearing
gradient.
Ref: https://en.wikipedia.org/wiki/Jaccard_index
@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96
@author: wassname
"""
epsilon = tf.convert_to_tensor(1e-7, dtype='float32')
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return (1 - jac)
# matrix accu
def jaccard_distance_acc(y_true, y_pred):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jac = (intersection) / (sum_ - intersection)
return jac
# def scaled_binary_cross_entropy(y_true,y_pred):
# epsilon = tf.convert_to_tensor(1e-7, dtype='float32')
# r =K.binary_crossentropy(y_true,y_pred)
# return r/(r.max()+epsilon)
def scaled_binary_cross_entropy(y_true,y_pred):
epsilon = tf.convert_to_tensor(1e-7, dtype='float32')
loss =binary_crossentropy(y_true,y_pred)
max_t = K.max(loss)
return loss/(max_t+epsilon)
def fbeta(y_true, y_pred, threshold_shift=0):
beta = 2
# just in case of hipster activation at the final layer
y_pred = K.clip(y_pred, 0, 1)
# shifting the prediction threshold from .5 if needed
y_pred_bin = K.round(y_pred + threshold_shift)
tp = K.sum(K.round(y_true * y_pred_bin), axis=1) + K.epsilon()
fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)), axis=1)
fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)), axis=1)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
beta_squared = beta ** 2
return K.mean((beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall + K.epsilon()))
accu = jaccard_distance_acc
# 32하니까 죽어버린다.
batch_size = 32
l2_reg = 0.0001
activity_l2 = 0.0001
# Define the model
# inp = InputLayer(shape=(max_count,), dtype='float32',sparse=True)
# input layer는
# inp = InputLayer(input_shape=(max_count,),sparse=True)
# inp = Input(shape=(max_count,))
inp = Input(shape=(max_count,), dtype='float32')
emb = Embedding(input_dim=vocab_size+1, output_dim=EMB_DIM,
trainable=False, weights=[embedding_matrix], input_length=max_count)(inp)
# max_features = vocab_size, maxlen=text_max_words, embed_size=EMB_DIM
# emb = Embedding(input_dim=max_features, input_length = maxlen, output_dim=embed_size)(inp)
# embedding dropout = 0.1
x = SpatialDropout1D(0.1)(emb)
# x = Bidirectional(LSTM(lstm_shape, return_sequences=True, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm, W_regularizer=l2(l2_reg)))(x)
# weight, bias, hidden state 수정해보기 , 현재는 kernel만 regularization 시켰다.
x = Bidirectional(LSTM(lstm_shape, return_sequences=True, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm,kernel_regularizer=l2(l2_reg)))(x)
x, attention = Attention()(x,rate_drop_dense,l2_reg)
# BatchNOrmalization 추가
# x = BatchNormalization()(x)
# 배치 노멀라이제이션 했을때 결과
# Train on 6474 samples, validate on 867 samples
# Epoch 1/50
# 16/6474 [..............................] - ETA: 20:46 - loss: 1.0745 - jaccard 32/6474 [..............................] - ETA: 11:52 - loss: 1.0172 - jaccard 48/6474 [..............................] - ETA: 8:53 - loss: 0.9787 - jaccard_ 64/6474 [..............................] - ETA: 7:24 - loss: 0.9671 - jaccard_ 80/6474 [..............................] - ETA: 6:30 - loss: 0.9672 - jaccard_ 96/6474 [..............................] - ETA: 5:55 - loss: 0.9630 - jaccard_ 112/6474 [..............................] - ETA: 5:29 - loss: 0.9638 - jaccard_ 128/6474 [..............................] - ETA: 5:10 - loss: 0.9557 - jaccard_ 144/6474 [..............................] - ETA: 4:54 - loss: 0.9461 - jaccard_ 160/6474 [..............................] - ETA: 4:42 - loss: 0.9401 - jaccard_ 176/6474 [..............................] - ETA: 4:31 - loss: 0.9331 - jaccard_ 192/6474 [..............................] - ETA: 4:23 - loss: 0.9287 - jaccard_ 208/6474 [..............................] - ETA: 4:15 - loss: 0.9296 - jaccard_ 224/6474 [>.............................] - ETA: 4:09 - loss: 0.9260 - jaccard_ 240/6474 [>.............................] - ETA: 4:03 - loss: 0.9206 - jaccard_ 256/6474 [>.............................] - ETA: 3:59 - loss: 0.9146 - jaccard_ 272/6474 [>.............................] - ETA: 3:54 - loss: 0.9106 - jaccard_ 288/6474 [>.............................] - ETA: 3:50 - loss: 0.9075 - jaccard_ 304/6474 [>.............................] - ETA: 3:47 - loss: 0.9047 - jaccard_ 320/6474 [>.............................] - ETA: 3:44 - loss: 0.9023 - jaccard_ 336/6474 [>.............................] - ETA: 3:41 - loss: 0.8991 - jaccard_ 352/6474 [>.............................] - ETA: 3:38 - loss: 0.8952 - jaccard_ 368/6474 [>.............................] - ETA: 3:36 - loss: 0.8920 - jaccard_ 384/6474 [>.............................] - ETA: 3:34 - loss: 0.8883 - jaccard_ 400/6474 [>.............................] - ETA: 3:32 - loss: 0.8852 - jaccard_ 416/6474 [>.............................] - ETA: 3:29 - loss: 0.8824 - jaccard_ 432/6474 [=>............................] - ETA: 3:28 - loss: 0.8802 - jaccard_ 448/6474 [=>............................] - ETA: 3:27 - loss: 0.8775 - jaccard_ 464/6474 [=>............................] - ETA: 3:25 - loss: 0.8795 - jaccard_ 480/6474 [=>............................] - ETA: 3:23 - loss: 0.8768 - jaccard_ 496/6474 [=>............................] - ETA: 3:22 - loss: 0.8741 - jaccard_ 512/6474 [=>............................] - ETA: 3:20 - loss: 0.8715 - jaccard_ 528/6474 [=>............................] - ETA: 3:18 - loss: 0.8688 - jaccard_ 544/6474 [=>............................] - ETA: 3:17 - loss: 0.8665 - jaccard_ 560/6474 [=>............................] - ETA: 3:15 - loss: 0.8641 - jaccard_ 576/6474 [=>............................] - ETA: 3:14 - loss: 0.8612 - jaccard_ 592/6474 [=>............................] - ETA: 3:13 - loss: 0.8591 - jaccard_ 608/6474 [=>............................] - ETA: 3:11 - loss: 0.8568 - jaccard_ 624/6474 [=>............................] - ETA: 3:10 - loss: 0.8547 - jaccard_ 640/6474 [=>............................] - ETA: 3:09 - loss: 0.8524 - jaccard_ 656/6474 [==>...........................] - ETA: 3:08 - loss: 0.8501 - jaccard_ 672/6474 [==>...........................] - ETA: 3:07 - loss: 0.8476 - jaccard_ 688/6474 [==>...........................] - ETA: 3:05 - loss: 0.8453 - jaccard_ 704/6474 [==>...........................] - ETA: 3:04 - loss: 0.8429 - jaccard_ 720/6474 [==>...........................] - ETA: 3:03 - loss: 0.84 912/6474 [===>..........................] - ETA: 2:53 - loss: 0.8130 - jaccard_distance_acc: 0.1665 - fbeta: 0.2083fbeta: 0.2358
# 6474/6474 [==============================] - 206s 32ms/step - loss: 0.5829 - jaccard_distance_acc: 0.1617 - fbeta: 0.0630 - val_loss: 0.5062 - val_jaccard_distance_acc: 0.1619 - val_fbeta: 0.0180
# Epoch 2/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4948 - jaccard_distance_acc: 0.1611 - fbeta: 0.0348 - val_loss: 0.4914 - val_jaccard_distance_acc: 0.1796 - val_fbeta: 0.0161
# Epoch 3/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4834 - jaccard_distance_acc: 0.1609 - fbeta: 0.0316 - val_loss: 0.4861 - val_jaccard_distance_acc: 0.1738 - val_fbeta: 0.0166
# Epoch 4/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4809 - jaccard_distance_acc: 0.1612 - fbeta: 0.0326 - val_loss: 0.4925 - val_jaccard_distance_acc: 0.1617 - val_fbeta: 0.0176
# Epoch 5/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4811 - jaccard_distance_acc: 0.1614 - fbeta: 0.0344 - val_loss: 0.4842 - val_jaccard_distance_acc: 0.1642 - val_fbeta: 0.0161
# Epoch 6/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4808 - jaccard_distance_acc: 0.1606 - fbeta: 0.0327 - val_loss: 0.4809 - val_jaccard_distance_acc: 0.1715 - val_fbeta: 0.0172
# Epoch 7/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4837 - jaccard_distance_acc: 0.1613 - fbeta: 0.0373 - val_loss: 3.1112 - val_jaccard_distance_acc: 0.2469 - val_fbeta: 0.5642
# Epoch 8/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.5343 - jaccard_distance_acc: 0.1631 - fbeta: 0.0678 - val_loss: 0.5607 - val_jaccard_distance_acc: 0.1851 - val_fbeta: 0.2194
# Epoch 9/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4915 - jaccard_distance_acc: 0.1605 - fbeta: 0.0310 - val_loss: 0.4925 - val_jaccard_distance_acc: 0.1687 - val_fbeta: 0.0161
# Epoch 10/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4810 - jaccard_distance_acc: 0.1611 - fbeta: 0.0325 - val_loss: 0.4936 - val_jaccard_distance_acc: 0.1533 - val_fbeta: 0.0161
# Epoch 11/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4798 - jaccard_distance_acc: 0.1609 - fbeta: 0.0323 - val_loss: 0.4872 - val_jaccard_distance_acc: 0.1572 - val_fbeta: 0.0161
# Epoch 12/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4754 - jaccard_distance_acc: 0.1606 - fbeta: 0.0309 - val_loss: 0.4837 - val_jaccard_distance_acc: 0.1626 - val_fbeta: 0.0161
# Epoch 13/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4821 - jaccard_distance_acc: 0.1606 - fbeta: 0.0315 - val_loss: 0.4865 - val_jaccard_distance_acc: 0.1684 - val_fbeta: 0.0161
# Epoch 14/50
# 6474/6474 [==============================] - 194s 30ms/step - loss: 0.4783 - jaccard_distance_acc: 0.1612 - fbeta: 0.0325 - val_loss: 0.4824 - val_jaccard_distance_acc: 0.1702 - val_fbeta: 0.0161
# Epoch 15/50
# 6474/6474 [==============================] - 186s 29ms/step - loss: 0.4774 - jaccard_distance_acc: 0.1615 - fbeta: 0.0329 - val_loss: 0.4809 - val_jaccard_distance_acc: 0.1674 - val_fbeta: 0.0161
# Epoch 16/50
# 6474/6474 [==============================] - 186s 29ms/step - loss: 0.4771 - jaccard_distance_acc: 0.1609 - fbeta: 0.0309 - val_loss: 0.4808 - val_jaccard_distance_acc: 0.1681 - val_fbeta: 0.0161
# Epoch 17/50
# 6474/6474 [==============================] - 193s 30ms/step - loss: 0.4889 - jaccard_distance_acc: 0.1615 - fbeta: 0.0434 - val_loss: 0.4949 - val_jaccard_distance_acc: 0.1591 - val_fbeta: 0.0161
# Epoch 18/50
# 6474/6474 [==============================] - 186s 29ms/step - loss: 0.5120 - jaccard_distance_acc: 0.1625 - fbeta: 0.0496 - val_loss: 0.5535 - val_jaccard_distance_acc: 0.1604 - val_fbeta: 0.0180
# Epoch 19/50
# 6474/6474 [==============================] - 187s 29ms/step - loss: 0.4868 - jaccard_distance_acc: 0.1611 - fbeta: 0.0316 - val_loss: 0.4873 - val_jaccard_distance_acc: 0.1664 - val_fbeta: 0.0161
# Epoch 20/50
# 6474/6474 [==============================] - 190s 29ms/step - loss: 0.4776 - jaccard_distance_acc: 0.1608 - fbeta: 0.0317 - val_loss: 0.4806 - val_jaccard_distance_acc: 0.1640 - val_fbeta: 0.0161
# Epoch 21/50
# 6474/6474 [==============================] - 201s 31ms/step - loss: 0.4749 - jaccard_distance_acc: 0.1610 - fbeta: 0.0307 - val_loss: 0.4834 - val_jaccard_distance_acc: 0.1651 - val_fbeta: 0.0161
# Epoch 22/50
# 6474/6474 [==============================] - 190s 29ms/step - loss: 0.4760 - jaccard_distance_acc: 0.1608 - fbeta: 0.0307 - val_loss: 0.4805 - val_jaccard_distance_acc: 0.1689 - val_fbeta: 0.0161
# Epoch 23/50
# 144/6474 [..............................] - ETA: 2:58 - loss: 0.4965 - jaccard_distance_acc: 0.1596 - fbeta: 0.0278^Z
# Dense(11, activation="sigmoid",activity_regularizer=activity_l2(0.0001))
x = Dense(11, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
# # 1
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=[accu])
# Epoch 1/10
# 32/6474 [..............................] - ETA: 9:22 - loss: 0.7045 - jaccard_ 64/6474 [..............................] - ETA: 6:18 - loss: 0.6864 - jaccard_ 96/6474 [..............................] - ETA: 5:04 - loss: 0.6675 - jaccard_ 128/6474 [..............................] - ETA: 4:24 - loss: 0.6393 - jaccard_ 160/6474 [..............................] - ETA: 3:59 - loss: 0.6099 - jaccard_ 192/6474 [..............................] - ETA: 3:44 - loss: 0.5952 - jaccard_ 224/6474 [>.............................] - ETA: 3:31 - loss: 0.5786 - jaccard_ 256/6474 [>.............................] - ETA: 3:22 - loss: 0.5659 - jaccard_ 288/6474 [>.............................] - ETA: 3:15 - loss: 0.5540 - jaccard_ 320/6474 [>.............................] - ETA: 3:09 - loss: 0.5526 - jaccard_ 352/6474 [>.............................] - ETA: 3:04 - loss: 0.5446 - jaccard_ 384/6474 [>.............................] - ETA: 3:00 - loss: 0.5385 - jaccard_ 416/6474 [>.............................] - ETA: 2:56 - loss: 0.5341 - jaccard_ 448/6474 [=>............................] - ETA: 2:53 - loss: 0.5309 - jaccard_ 480/6474 [=>............................] - ETA: 2:50 - loss: 0.5289 - jaccard_ 512/6474 [=>............................] - ETA: 2:47 - loss: 0.5280 - jaccard_ 544/6474 [=>............................] - ETA: 2:45 - loss: 0.5263 - jaccard_ 576/6474 [=>............................] - ETA: 2:43 - loss: 0.5224 - jaccard_ 608/6474 [=>............................] - ETA: 2:41 - loss: 0.5201 - jaccard_ 640/6474 [=>............................] - ETA: 2:39 - loss: 0.5217 - jaccard_ 672/6474 [==>...........................] - ETA: 2:38 - loss: 0.5178 - jaccard_ 704/6474 [==>...........................] - ETA: 2:37 - loss: 0.5154 - jaccard_ 736/6474 [==>...........................] - ETA: 2:35 - loss: 0.5115 - jaccard_ 768/6474 [==>...........................] - ETA: 2:34 - loss: 0.5113 - jaccard_ 800/6474 [==>...........................] - ETA: 2:32 - loss: 0.5100 - jaccard_ 832/6474 [==>...........................] - ETA: 2:31 - loss: 0.5091 - jaccard_ 864/6474 [===>..........................] - ETA: 2:29 - loss: 0.5059 - jaccard_ 896/6474 [===>..........................] - ETA: 2:28 - loss: 0.5033 - jaccard_ 928/6474 [===>..........................] - ETA: 2:27 - loss: 0.5028 - jaccard_ 2496/6474 [==========>...................] - ETA: 1:39 - loss: 0.4840 - jaccard_distance_acc: 2592/6474 [===========>..................] - ETA: 1:36 - loss: 0.2624/6474 [===========>..................] - ETA: 1:36 - loss: 0.4848 - jaccard_2656/6474 [===========>..................] - ETA: 1:35 - loss: 0.4852 - jaccard_2688/6474 [===========>..................] - ETA: 1:34 - loss: 0.4850 - jaccard_2720/6474 [===========>..................] - ETA: 1:33 - loss: 0.4851 - jaccard_2752/6474 [===========>..................] - ETA: 1:32 - loss: 0.4851 - jaccard_2784/6474 [===========>..................] - ETA: 1:31 - loss: 0.4850 - jaccard_2816/6474 [============>.................] - ETA: 1:31 - loss: 0.4847 - jaccard_2848/6474 [============>.................] - ETA: 1:30 - loss: 0.4848 - jaccard_2880/6474 [============>.................] - ETA: 1:29 - loss: 0.4846 - jaccard_2912/6474 [============>.................] - ETA: 1:28 - loss: 0.4849 - jaccard_2944/6474 [============>.................] - ETA: 1:27 - loss: 0.4845 - jaccard_2976/6474 [=====6474/6474 [==============================] - 165s 26ms/step - loss: 0.4775 - jaccard_distance_acc: 0.1631 - val_loss: 0.4660 - val_jaccard_distance_acc: 0.1825.1623
# Epoch 2/10
# 1408/6474 [=====>........................] - ETA: 2:01 - loss: 0.4571 - jaccard_1440/6474 [=====>........................] - ETA: 2:01 - loss: 0.4582 - jaccard_1472/6474 [=====>........................] - ETA: 2:00 - loss: 0.4585 - jaccard_1504/6474 [=====>........................] - ETA: 1:59 - loss: 0.4579 - jaccard_1536/6474 [======>.......................] - ETA: 1:59 - loss: 0.4578 - jaccard_1568/6474 [======>.......................] - ETA: 1:58 - loss: 0.4573 - jaccard_1600/6474 [======>.......................] - ETA: 1:57 - loss: 0.4581 - jaccard_1632/6474 [======>.......................] - ETA: 1:56 - loss: 0.4579 - jaccard_1664/6474 [======>.......................] - ETA: 1:56 - loss: 0.4582 - jaccard_1696/6474 [======>.......................] - ETA: 1:55 - loss: 0.4581 - jaccard_1728/6474 [=======>......................] - ETA: 1:54 - loss: 0.4584 - jaccard_1760/6474 [=======>......................] - ETA: 1:53 - loss: 0.4583 - jaccard_1792/6474 [=======>......................] - ETA: 1:53 - loss: 0.4582 - jaccard_1824/6474 [=======>......................] - ETA: 1:52 - loss: 0.4584 - jaccard_1856/6474 [=======>......................] - ETA: 1:51 - loss: 0.4581 - jaccard_1888/6474 [=======>......................] - ETA: 1:50 - loss: 0.4575 - jaccard_1920/6474 [=======>......................] - ETA: 1:50 - loss: 0.4587 - jaccard_1952/6474 [========>.....................] - ETA: 1:49 - loss: 0.4585 - jaccard_1984/6474 [========>.....................] - ETA: 1:48 - loss: 0.4582 - jaccard_2016/6474 [========>.....................] - ETA: 1:47 - loss: 0.4582 - jaccard_2048/6474 [========>.....................] - ETA: 1:46 - loss: 0.4579 - jaccard_2080/6474 [========>.....................] - ETA: 1:46 - loss: 0.4578 - jaccard_2112/6474 [========>.....................] - ETA: 1:45 - loss: 0.4576 - jaccard_2144/6474 [========>.....................] - ETA: 1:44 - loss: 0.4573 - jaccard_2176/6474 [=========>....................] - ETA: 1:43 - loss: 0.4570 - jaccard_2208/6474 [=========>....................] - ETA: 1:43 - loss: 0.4568 - jaccard_2240/6474 [=========>.........4608/6474 [====================>.........] - ETA: 45s - loss: 0.4567 - jaccard_distance_acc: 0.6474/6474 [==============================] - 165s 26ms/step - loss: 0.4560 - jaccard_distance_acc: 0.1827 - val_loss: 0.4383 - val_jaccard_distance_acc: 0.2132
# Epoch 3/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.4378 - jaccard_distance_acc: 0.2064 - val_loss: 0.4245 - val_jaccard_distance_acc: 0.2289
# Epoch 4/10
# 6474/6474 [==============================] - 163s 25ms/step - loss: 0.4251 - jaccard_distance_acc: 0.2249 - val_loss: 0.4217 - val_jaccard_distance_acc: 0.2414
# Epoch 5/10
# 6474/6474 [==============================] - 2968s 458ms/step - loss: 0.4117 - jaccard_distance_acc: 0.2425 - val_loss: 0.4122 - val_jaccard_distance_acc: 0.2415
# Epoch 6/10
# 6474/6474 [==============================] - 168s 26ms/step - loss: 0.4013 - jaccard_distance_acc: 0.2553 - val_loss: 0.4052 - val_jaccard_distance_acc: 0.2642
# Epoch 7/10
# 6474/6474 [==============================] - 169s 26ms/step - loss: 0.3919 - jaccard_distance_acc: 0.2667 - val_loss: 0.4075 - val_jaccard_distance_acc: 0.2673
# Epoch 8/10
# 6474/6474 [==============================] - 163s 25ms/step - loss: 0.3826 - jaccard_distance_acc: 0.2808 - val_loss: 0.4040 - val_jaccard_distance_acc: 0.2709
# Epoch 9/10
# 6474/6474 [==============================] - 163s 25ms/step - loss: 0.3721 - jaccard_distance_acc: 0.2928 - val_loss: 0.4054 - val_jaccard_distance_acc: 0.2911
# Epoch 10/10
# 6474/6474 [==============================] - 163s 25ms/step - loss: 0.3616 - jaccard_distance_acc: 0.3084 - val_loss: 0.4056 - val_jaccard_distance_acc: 0.2884
# 3151/3151 [==============================] - 26s 8ms/step
# 2
# model.compile(loss=jaccard_distance_loss,
# optimizer='adam',
# metrics=[accu])
# Train on 6474 samples, validate on 867 samples
# Epoch 1/10
# 6474/6474 [==============================] - 164s 25ms/step - loss: 0.6940 - jaccard_distance_acc: 0.3060 - val_loss: 0.6702 - val_jaccard_distance_acc: 0.3298
# Epoch 2/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.6902 - jaccard_distance_acc: 0.3098 - val_loss: 0.6702 - val_jaccard_distance_acc: 0.3298
# Epoch 3/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.6902 - jaccard_distance_acc: 0.3098 - val_loss: 0.6702 - val_jaccard_distance_acc: 0.3298
# Epoch 4/10
# 2144/6474 [========>.....................] - ETA: 1:43 - loss: 0.6901 - jaccard_distance_acc: 0.3099^Z
# [1]+ 정지됨 python3 181120_preprocessing.py
# 3
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accracy'])
# Train on 6474 samples, validate on 867 samples
# Epoch 1/10
# 6474/6474 [==============================] - 163s 25ms/step - loss: 0.4774 - acc: 0.7840 - val_loss: 0.4684 - val_acc: 0.7794
# Epoch 2/10
# 6474/6474 [==============================] - 161s 25ms/step - loss: 0.4563 - acc: 0.7935 - val_loss: 0.4335 - val_acc: 0.8041
# Epoch 3/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.4369 - acc: 0.8056 - val_loss: 0.4264 - val_acc: 0.8064
# Epoch 4/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.4242 - acc: 0.8110 - val_loss: 0.4200 - val_acc: 0.8087
# Epoch 5/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.4104 - acc: 0.8174 - val_loss: 0.4119 - val_acc: 0.8171
# Epoch 6/10
# 6474/6474 [==============================] - 162s 25ms/step - loss: 0.4017 - acc: 0.8241 - val_loss: 0.4063 - val_acc: 0.8188
# Epoch 7/10
# 6474/6474 [==============================] - 730s 113ms/step - loss: 0.3942 - acc: 0.8273 - val_loss: 0.4063 - val_acc: 0.8184
# Epoch 8/10
# 6474/6474 [==============================] - 168s 26ms/step - loss: 0.3857 - acc: 0.8318 - val_loss: 0.4036 - val_acc: 0.8199
# Epoch 9/10
# 6474/6474 [==============================] - 164s 25ms/step - loss: 0.3777 - acc: 0.8355 - val_loss: 0.4021 - val_acc: 0.8219
# Epoch 10/10
# 6474/6474 [==============================] - 179s 28ms/step - loss: 0.3676 - acc: 0.8384 - val_loss: 0.4051 - val_acc: 0.8204
# 3151/3151 [==============================] - 27s 9ms/step
# score : 0.406278005236acc : 0.823230730148
# 4
# model.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['acc', accu])
# Epoch 1/50
# 16/6474 [..............................] - ETA: 31:06 - loss: 0.6962 - jaccard_distance_acc: 0.1575 - f 32/6474 [..............................] - ETA: 18:30 - loss: 0.6798 - jaccard_distance_acc: 0.1541 - f 48/6474 [..............................] - ETA: 14:12 - loss: 0.6345 - jaccard_distance_acc: 0.1517 - f 64/6474 [..............................] - ETA: 12:08 - loss: 0.6052 - jaccard_distance_acc: 0.1490 - f 80/6474 [..............................] - ETA: 10:58 - loss: 0.5777 - jaccard_distance_acc: 0.1464 - f 96/6474 [..............................] - ETA: 10:07 - loss: 0.5617 - jaccard_distance_acc: 0.1479 - f 112/6474 [..............................] - ETA: 9:29 - loss: 0.5565 - jaccard_distance_acc: 0.1488 - fb 128/6474 [..............................] - ETA: 9:03 - loss: 0.5461 - jaccard_distance_acc: 0.1512 - fb 144/6474 [..............................] - ETA: 8:39 - loss: 0.5464 - jaccard_distance_acc: 0.1553 - fb 160/6474 [..............................] - ETA: 8:21 - loss: 0.5346 - jaccard_distance_acc: 0.1541 - fb 176/6474 [..............................] - ETA: 8:07 - loss: 0.5358 - jaccard_distance_acc: 0.1555 - fb 192/6474 [..............................] - ETA: 7:53 - loss: 0.5330 - jaccard_distance_acc: 0.1548 - fb 208/6474 [..............................] - ETA: 7:41 - loss: 0.5300 - jaccard_distance_acc: 0.1539 - fb 224/6474 [>.............................] - ETA: 7:31 - loss: 0.5278 - jaccard_distance_acc: 0.1524 - fb 240/6474 [>.............................] - ETA: 7:22 - loss: 0.5257 - jaccard_distance_acc: 0.1515 - fb 256/6474 [>.............................] - ETA: 7:16 - loss: 0.5219 - jaccard_distance_acc: 0.1515 - fb 272/6476474/6474 [==============================] - 377s 58ms/step - loss: 0.4761 - jaccard_distance_acc: 0.1626 - fbeta: 0.0427 - val_loss: 0.4718 - val_jaccard_distance_acc: 0.1743 - val_fbeta: 0.0150
# Epoch 2/50
# 6474/6474 [==============================] - 367s 57ms/step - loss: 0.4506 - jaccard_distance_acc: 0.1899 - fbeta: 0.1640 - val_loss: 0.4286 - val_jaccard_distance_acc: 0.2238 - val_fbeta: 0.2165
# Epoch 3/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4303 - jaccard_distance_acc: 0.2180 - fbeta: 0.2692 - val_loss: 0.4274 - val_jaccard_distance_acc: 0.2380 - val_fbeta: 0.2963
# Epoch 4/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4167 - jaccard_distance_acc: 0.2362 - fbeta: 0.3153 - val_loss: 0.4138 - val_jaccard_distance_acc: 0.2498 - val_fbeta: 0.3165
# Epoch 5/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4004 - jaccard_distance_acc: 0.2586 - fbeta: 0.3697 - val_loss: 0.4156 - val_jaccard_distance_acc: 0.2428 - val_fbeta: 0.2902
# Epoch 6/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3892 - jaccard_distance_acc: 0.2726 - fbeta: 0.3920 - val_loss: 0.4030 - val_jaccard_distance_acc: 0.2778 - val_fbeta: 0.3690
# Epoch 7/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3776 - jaccard_distance_acc: 0.2876 - fbeta: 0.4294 - val_loss: 0.4071 - val_jaccard_distance_acc: 0.2727 - val_fbeta: 0.3754
# Epoch 8/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3661 - jaccard_distance_acc: 0.3027 - fbeta: 0.4526 - val_loss: 0.4072 - val_jaccard_distance_acc: 0.2715 - val_fbeta: 0.3536
# Epoch 9/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3536 - jaccard_distance_acc: 0.3184 - fbeta: 0.4749 - val_loss: 0.4045 - val_jaccard_distance_acc: 0.2980 - val_fbeta: 0.4100
# Epoch 10/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3456 - jaccard_distance_acc: 0.3320 - fbeta: 0.4995 - val_loss: 0.4084 - val_jaccard_distance_acc: 0.3023 - val_fbeta: 0.4141
# Epoch 11/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3350 - jaccard_distance_acc: 0.3447 - fbeta: 0.5130 - val_loss: 0.4093 - val_jaccard_distance_acc: 0.2999 - val_fbeta: 0.4063
# Epoch 12/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3227 - jaccard_distance_acc: 0.3613 - fbeta: 0.5413 - val_loss: 0.4135 - val_jaccard_distance_acc: 0.3017 - val_fbeta: 0.4054
# Epoch 13/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.3143 - jaccard_distance_acc: 0.3726 - fbeta: 0.5565 - val_loss: 0.4175 - val_jaccard_distance_acc: 0.3082 - val_fbeta: 0.4300
# Epoch 14/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3070 - jaccard_distance_acc: 0.3844 - fbeta: 0.5686 - val_loss: 0.4289 - val_jaccard_distance_acc: 0.3154 - val_fbeta: 0.4311
# Epoch 15/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2980 - jaccard_distance_acc: 0.3942 - fbeta: 0.5858 - val_loss: 0.4412 - val_jaccard_distance_acc: 0.3166 - val_fbeta: 0.4379
# Epoch 16/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2850 - jaccard_distance_acc: 0.4098 - fbeta: 0.6062 - val_loss: 0.4378 - val_jaccard_distance_acc: 0.3181 - val_fbeta: 0.4349
# Epoch 17/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2803 - jaccard_distance_acc: 0.4209 - fbeta: 0.6202 - val_loss: 0.4379 - val_jaccard_distance_acc: 0.3192 - val_fbeta: 0.4295
# Epoch 18/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2729 - jaccard_distance_acc: 0.4319 - fbeta: 0.6358 - val_loss: 0.4504 - val_jaccard_distance_acc: 0.3201 - val_fbeta: 0.4383
# Epoch 19/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2633 - jaccard_distance_acc: 0.4444 - fbeta: 0.6461 - val_loss: 0.4479 - val_jaccard_distance_acc: 0.3229 - val_fbeta: 0.4409
# Epoch 20/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.2576 - jaccard_distance_acc: 0.4531 - fbeta: 0.6581 - val_loss: 0.4578 - val_jaccard_distance_acc: 0.3308 - val_fbeta: 0.4555
# Epoch 21/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2511 - jaccard_distance_acc: 0.4639 - fbeta: 0.6706 - val_loss: 0.4673 - val_jaccard_distance_acc: 0.3209 - val_fbeta: 0.4305
# Epoch 22/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2436 - jaccard_distance_acc: 0.4743 - fbeta: 0.6839 - val_loss: 0.4847 - val_jaccard_distance_acc: 0.3328 - val_fbeta: 0.4618
# Epoch 23/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2365 - jaccard_distance_acc: 0.4849 - fbeta: 0.6919 - val_loss: 0.4871 - val_jaccard_distance_acc: 0.3315 - val_fbeta: 0.4600
# Epoch 24/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2320 - jaccard_distance_acc: 0.4910 - fbeta: 0.7011 - val_loss: 0.4853 - val_jaccard_distance_acc: 0.3275 - val_fbeta: 0.4493
# Epoch 25/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2269 - jaccard_distance_acc: 0.4989 - fbeta: 0.7053 - val_loss: 0.4953 - val_jaccard_distance_acc: 0.3331 - val_fbeta: 0.4499
# Epoch 26/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2174 - jaccard_distance_acc: 0.5130 - fbeta: 0.7232 - val_loss: 0.4959 - val_jaccard_distance_acc: 0.3457 - val_fbeta: 0.4814
# Epoch 27/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.2148 - jaccard_distance_acc: 0.5194 - fbeta: 0.7295 - val_loss: 0.5119 - val_jaccard_distance_acc: 0.3323 - val_fbeta: 0.4494
# Epoch 28/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2082 - jaccard_distance_acc: 0.5294 - fbeta: 0.7392 - val_loss: 0.5138 - val_jaccard_distance_acc: 0.3390 - val_fbeta: 0.4622
# Epoch 29/50
# 6474/6474 [==============================] - 361s 56ms/step - loss: 0.2031 - jaccard_distance_acc: 0.5345 - fbeta: 0.7428 - val_loss: 0.5134 - val_jaccard_distance_acc: 0.3390 - val_fbeta: 0.4613
# Epoch 30/50
# 6474/6474 [==============================] - 361s 56ms/step - loss: 0.1986 - jaccard_distance_acc: 0.5456 - fbeta: 0.7544 - val_loss: 0.5245 - val_jaccard_distance_acc: 0.3289 - val_fbeta: 0.4362
# Epoch 31/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1950 - jaccard_distance_acc: 0.5508 - fbeta: 0.7570 - val_loss: 0.5260 - val_jaccard_distance_acc: 0.3360 - val_fbeta: 0.4513
# Epoch 32/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1901 - jaccard_distance_acc: 0.5582 - fbeta: 0.7635 - val_loss: 0.5358 - val_jaccard_distance_acc: 0.3401 - val_fbeta: 0.4495
# Epoch 33/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1874 - jaccard_distance_acc: 0.5636 - fbeta: 0.7708 - val_loss: 0.5402 - val_jaccard_distance_acc: 0.3318 - val_fbeta: 0.4381
# Epoch 34/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.1830 - jaccard_distance_acc: 0.5722 - fbeta: 0.7789 - val_loss: 0.5433 - val_jaccard_distance_acc: 0.3342 - val_fbeta: 0.4462
# Epoch 35/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.1787 - jaccard_distance_acc: 0.5776 - fbeta: 0.7838 - val_loss: 0.5424 - val_jaccard_distance_acc: 0.3500 - val_fbeta: 0.4689
# Epoch 36/50
# 6474/6474 [==============================] - 341s 53ms/step - loss: 0.1774 - jaccard_distance_acc: 0.5819 - fbeta: 0.7876 - val_loss: 0.5405 - val_jaccard_distance_acc: 0.3441 - val_fbeta: 0.4530
# Epoch 37/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1704 - jaccard_distance_acc: 0.5920 - fbeta: 0.7924 - val_loss: 0.5666 - val_jaccard_distance_acc: 0.3288 - val_fbeta: 0.4324
# Epoch 38/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1692 - jaccard_distance_acc: 0.5960 - fbeta: 0.7988 - val_loss: 0.5631 - val_jaccard_distance_acc: 0.3399 - val_fbeta: 0.4492
# Epoch 39/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1657 - jaccard_distance_acc: 0.6000 - fbeta: 0.8028 - val_loss: 0.5748 - val_jaccard_distance_acc: 0.3386 - val_fbeta: 0.4434
# Epoch 40/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1635 - jaccard_distance_acc: 0.6059 - fbeta: 0.8055 - val_loss: 0.5664 - val_jaccard_distance_acc: 0.3453 - val_fbeta: 0.4482
# Epoch 41/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1621 - jaccard_distance_acc: 0.6110 - fbeta: 0.8102 - val_loss: 0.5700 - val_jaccard_distance_acc: 0.3384 - val_fbeta: 0.4450
# Epoch 42/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1555 - jaccard_distance_acc: 0.6179 - fbeta: 0.8133 - val_loss: 0.5807 - val_jaccard_distance_acc: 0.3462 - val_fbeta: 0.4560
# Epoch 43/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1540 - jaccard_distance_acc: 0.6219 - fbeta: 0.8187 - val_loss: 0.5937 - val_jaccard_distance_acc: 0.3437 - val_fbeta: 0.4520
# Epoch 44/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1526 - jaccard_distance_acc: 0.6263 - fbeta: 0.8194 - val_loss: 0.5912 - val_jaccard_distance_acc: 0.3377 - val_fbeta: 0.4440
# Epoch 45/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1491 - jaccard_distance_acc: 0.6319 - fbeta: 0.8246 - val_loss: 0.6023 - val_jaccard_distance_acc: 0.3425 - val_fbeta: 0.4602
# Epoch 46/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1453 - jaccard_distance_acc: 0.6380 - fbeta: 0.8269 - val_loss: 0.5966 - val_jaccard_distance_acc: 0.3441 - val_fbeta: 0.4441
# Epoch 47/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1446 - jaccard_distance_acc: 0.6409 - fbeta: 0.8328 - val_loss: 0.5986 - val_jaccard_distance_acc: 0.3503 - val_fbeta: 0.4610
# Epoch 48/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1432 - jaccard_distance_acc: 0.6418 - fbeta: 0.8332 - val_loss: 0.6044 - val_jaccard_distance_acc: 0.3531 - val_fbeta: 0.4591
# Epoch 49/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1396 - jaccard_distance_acc: 0.6516 - fbeta: 0.8384 - val_loss: 0.6242 - val_jaccard_distance_acc: 0.3518 - val_fbeta: 0.4558
# Epoch 50/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1382 - jaccard_distance_acc: 0.6537 - fbeta: 0.8399 - val_loss: 0.6139 - val_jaccard_distance_acc: 0.3450 - val_fbeta: 0.4513
# 3151/3151 [==============================] - 27s 8ms/step
# attention_model = Model(inputs=inp, outputs=attention) # Model to print out the attention data
# model.summary()
# verbose= ? , validation_split은 validation file로 변환시켜주어야 한다.
# 5
# model.compile(loss='binary_crossentropy',
# optimizer=Adam(clipnorm=1, lr=0.001),
# metrics=[accu, fbeta])
# Epoch 1/50
# 16/6474 [..............................] - ETA: 31:06 - loss: 0.6962 - jaccard_distance_acc: 0.1575 - f 32/6474 [..............................] - ETA: 18:30 - loss: 0.6798 - jaccard_distance_acc: 0.1541 - f 48/6474 [..............................] - ETA: 14:12 - loss: 0.6345 - jaccard_distance_acc: 0.1517 - f 64/6474 [..............................] - ETA: 12:08 - loss: 0.6052 - jaccard_distance_acc: 0.1490 - f 80/6474 [..............................] - ETA: 10:58 - loss: 0.5777 - jaccard_distance_acc: 0.1464 - f 96/6474 [..............................] - ETA: 10:07 - loss: 0.5617 - jaccard_distance_acc: 0.1479 - f 112/6474 [..............................] - ETA: 9:29 - loss: 0.5565 - jaccard_distance_acc: 0.1488 - fb 128/6474 [..............................] - ETA: 9:03 - loss: 0.5461 - jaccard_distance_acc: 0.1512 - fb 144/6474 [..............................] - ETA: 8:39 - loss: 0.5464 - jaccard_distance_acc: 0.1553 - fb 160/6474 [..............................] - ETA: 8:21 - loss: 0.5346 - jaccard_distance_acc: 0.1541 - fb 176/6474 [..............................] - ETA: 8:07 - loss: 0.5358 - jaccard_distance_acc: 0.1555 - fb 192/6474 [..............................] - ETA: 7:53 - loss: 0.5330 - jaccard_distance_acc: 0.1548 - fb 208/6474 [..............................] - ETA: 7:41 - loss: 0.5300 - jaccard_distance_acc: 0.1539 - fb 224/6474 [>.............................] - ETA: 7:31 - loss: 0.5278 - jaccard_distance_acc: 0.1524 - fb 240/6474 [>.............................] - ETA: 7:22 - loss: 0.5257 - jaccard_distance_acc: 0.1515 - fb 256/6474 [>.............................] - ETA: 7:16 - loss: 0.5219 - jaccard_distance_acc: 0.1515 - fb 272/6476474/6474 [==============================] - 377s 58ms/step - loss: 0.4761 - jaccard_distance_acc: 0.1626 - fbeta: 0.0427 - val_loss: 0.4718 - val_jaccard_distance_acc: 0.1743 - val_fbeta: 0.0150
# Epoch 2/50
# 6474/6474 [==============================] - 367s 57ms/step - loss: 0.4506 - jaccard_distance_acc: 0.1899 - fbeta: 0.1640 - val_loss: 0.4286 - val_jaccard_distance_acc: 0.2238 - val_fbeta: 0.2165
# Epoch 3/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4303 - jaccard_distance_acc: 0.2180 - fbeta: 0.2692 - val_loss: 0.4274 - val_jaccard_distance_acc: 0.2380 - val_fbeta: 0.2963
# Epoch 4/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4167 - jaccard_distance_acc: 0.2362 - fbeta: 0.3153 - val_loss: 0.4138 - val_jaccard_distance_acc: 0.2498 - val_fbeta: 0.3165
# Epoch 5/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.4004 - jaccard_distance_acc: 0.2586 - fbeta: 0.3697 - val_loss: 0.4156 - val_jaccard_distance_acc: 0.2428 - val_fbeta: 0.2902
# Epoch 6/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3892 - jaccard_distance_acc: 0.2726 - fbeta: 0.3920 - val_loss: 0.4030 - val_jaccard_distance_acc: 0.2778 - val_fbeta: 0.3690
# Epoch 7/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3776 - jaccard_distance_acc: 0.2876 - fbeta: 0.4294 - val_loss: 0.4071 - val_jaccard_distance_acc: 0.2727 - val_fbeta: 0.3754
# Epoch 8/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3661 - jaccard_distance_acc: 0.3027 - fbeta: 0.4526 - val_loss: 0.4072 - val_jaccard_distance_acc: 0.2715 - val_fbeta: 0.3536
# Epoch 9/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3536 - jaccard_distance_acc: 0.3184 - fbeta: 0.4749 - val_loss: 0.4045 - val_jaccard_distance_acc: 0.2980 - val_fbeta: 0.4100
# Epoch 10/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3456 - jaccard_distance_acc: 0.3320 - fbeta: 0.4995 - val_loss: 0.4084 - val_jaccard_distance_acc: 0.3023 - val_fbeta: 0.4141
# Epoch 11/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3350 - jaccard_distance_acc: 0.3447 - fbeta: 0.5130 - val_loss: 0.4093 - val_jaccard_distance_acc: 0.2999 - val_fbeta: 0.4063
# Epoch 12/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.3227 - jaccard_distance_acc: 0.3613 - fbeta: 0.5413 - val_loss: 0.4135 - val_jaccard_distance_acc: 0.3017 - val_fbeta: 0.4054
# Epoch 13/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.3143 - jaccard_distance_acc: 0.3726 - fbeta: 0.5565 - val_loss: 0.4175 - val_jaccard_distance_acc: 0.3082 - val_fbeta: 0.4300
# Epoch 14/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.3070 - jaccard_distance_acc: 0.3844 - fbeta: 0.5686 - val_loss: 0.4289 - val_jaccard_distance_acc: 0.3154 - val_fbeta: 0.4311
# Epoch 15/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2980 - jaccard_distance_acc: 0.3942 - fbeta: 0.5858 - val_loss: 0.4412 - val_jaccard_distance_acc: 0.3166 - val_fbeta: 0.4379
# Epoch 16/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2850 - jaccard_distance_acc: 0.4098 - fbeta: 0.6062 - val_loss: 0.4378 - val_jaccard_distance_acc: 0.3181 - val_fbeta: 0.4349
# Epoch 17/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2803 - jaccard_distance_acc: 0.4209 - fbeta: 0.6202 - val_loss: 0.4379 - val_jaccard_distance_acc: 0.3192 - val_fbeta: 0.4295
# Epoch 18/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2729 - jaccard_distance_acc: 0.4319 - fbeta: 0.6358 - val_loss: 0.4504 - val_jaccard_distance_acc: 0.3201 - val_fbeta: 0.4383
# Epoch 19/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2633 - jaccard_distance_acc: 0.4444 - fbeta: 0.6461 - val_loss: 0.4479 - val_jaccard_distance_acc: 0.3229 - val_fbeta: 0.4409
# Epoch 20/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.2576 - jaccard_distance_acc: 0.4531 - fbeta: 0.6581 - val_loss: 0.4578 - val_jaccard_distance_acc: 0.3308 - val_fbeta: 0.4555
# Epoch 21/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2511 - jaccard_distance_acc: 0.4639 - fbeta: 0.6706 - val_loss: 0.4673 - val_jaccard_distance_acc: 0.3209 - val_fbeta: 0.4305
# Epoch 22/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2436 - jaccard_distance_acc: 0.4743 - fbeta: 0.6839 - val_loss: 0.4847 - val_jaccard_distance_acc: 0.3328 - val_fbeta: 0.4618
# Epoch 23/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2365 - jaccard_distance_acc: 0.4849 - fbeta: 0.6919 - val_loss: 0.4871 - val_jaccard_distance_acc: 0.3315 - val_fbeta: 0.4600
# Epoch 24/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2320 - jaccard_distance_acc: 0.4910 - fbeta: 0.7011 - val_loss: 0.4853 - val_jaccard_distance_acc: 0.3275 - val_fbeta: 0.4493
# Epoch 25/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2269 - jaccard_distance_acc: 0.4989 - fbeta: 0.7053 - val_loss: 0.4953 - val_jaccard_distance_acc: 0.3331 - val_fbeta: 0.4499
# Epoch 26/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.2174 - jaccard_distance_acc: 0.5130 - fbeta: 0.7232 - val_loss: 0.4959 - val_jaccard_distance_acc: 0.3457 - val_fbeta: 0.4814
# Epoch 27/50
# 6474/6474 [==============================] - 364s 56ms/step - loss: 0.2148 - jaccard_distance_acc: 0.5194 - fbeta: 0.7295 - val_loss: 0.5119 - val_jaccard_distance_acc: 0.3323 - val_fbeta: 0.4494
# Epoch 28/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.2082 - jaccard_distance_acc: 0.5294 - fbeta: 0.7392 - val_loss: 0.5138 - val_jaccard_distance_acc: 0.3390 - val_fbeta: 0.4622
# Epoch 29/50
# 6474/6474 [==============================] - 361s 56ms/step - loss: 0.2031 - jaccard_distance_acc: 0.5345 - fbeta: 0.7428 - val_loss: 0.5134 - val_jaccard_distance_acc: 0.3390 - val_fbeta: 0.4613
# Epoch 30/50
# 6474/6474 [==============================] - 361s 56ms/step - loss: 0.1986 - jaccard_distance_acc: 0.5456 - fbeta: 0.7544 - val_loss: 0.5245 - val_jaccard_distance_acc: 0.3289 - val_fbeta: 0.4362
# Epoch 31/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1950 - jaccard_distance_acc: 0.5508 - fbeta: 0.7570 - val_loss: 0.5260 - val_jaccard_distance_acc: 0.3360 - val_fbeta: 0.4513
# Epoch 32/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1901 - jaccard_distance_acc: 0.5582 - fbeta: 0.7635 - val_loss: 0.5358 - val_jaccard_distance_acc: 0.3401 - val_fbeta: 0.4495
# Epoch 33/50
# 6474/6474 [==============================] - 362s 56ms/step - loss: 0.1874 - jaccard_distance_acc: 0.5636 - fbeta: 0.7708 - val_loss: 0.5402 - val_jaccard_distance_acc: 0.3318 - val_fbeta: 0.4381
# Epoch 34/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.1830 - jaccard_distance_acc: 0.5722 - fbeta: 0.7789 - val_loss: 0.5433 - val_jaccard_distance_acc: 0.3342 - val_fbeta: 0.4462
# Epoch 35/50
# 6474/6474 [==============================] - 363s 56ms/step - loss: 0.1787 - jaccard_distance_acc: 0.5776 - fbeta: 0.7838 - val_loss: 0.5424 - val_jaccard_distance_acc: 0.3500 - val_fbeta: 0.4689
# Epoch 36/50
# 6474/6474 [==============================] - 341s 53ms/step - loss: 0.1774 - jaccard_distance_acc: 0.5819 - fbeta: 0.7876 - val_loss: 0.5405 - val_jaccard_distance_acc: 0.3441 - val_fbeta: 0.4530
# Epoch 37/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1704 - jaccard_distance_acc: 0.5920 - fbeta: 0.7924 - val_loss: 0.5666 - val_jaccard_distance_acc: 0.3288 - val_fbeta: 0.4324
# Epoch 38/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1692 - jaccard_distance_acc: 0.5960 - fbeta: 0.7988 - val_loss: 0.5631 - val_jaccard_distance_acc: 0.3399 - val_fbeta: 0.4492
# Epoch 39/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1657 - jaccard_distance_acc: 0.6000 - fbeta: 0.8028 - val_loss: 0.5748 - val_jaccard_distance_acc: 0.3386 - val_fbeta: 0.4434
# Epoch 40/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1635 - jaccard_distance_acc: 0.6059 - fbeta: 0.8055 - val_loss: 0.5664 - val_jaccard_distance_acc: 0.3453 - val_fbeta: 0.4482
# Epoch 41/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1621 - jaccard_distance_acc: 0.6110 - fbeta: 0.8102 - val_loss: 0.5700 - val_jaccard_distance_acc: 0.3384 - val_fbeta: 0.4450
# Epoch 42/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1555 - jaccard_distance_acc: 0.6179 - fbeta: 0.8133 - val_loss: 0.5807 - val_jaccard_distance_acc: 0.3462 - val_fbeta: 0.4560
# Epoch 43/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1540 - jaccard_distance_acc: 0.6219 - fbeta: 0.8187 - val_loss: 0.5937 - val_jaccard_distance_acc: 0.3437 - val_fbeta: 0.4520
# Epoch 44/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1526 - jaccard_distance_acc: 0.6263 - fbeta: 0.8194 - val_loss: 0.5912 - val_jaccard_distance_acc: 0.3377 - val_fbeta: 0.4440
# Epoch 45/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1491 - jaccard_distance_acc: 0.6319 - fbeta: 0.8246 - val_loss: 0.6023 - val_jaccard_distance_acc: 0.3425 - val_fbeta: 0.4602
# Epoch 46/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1453 - jaccard_distance_acc: 0.6380 - fbeta: 0.8269 - val_loss: 0.5966 - val_jaccard_distance_acc: 0.3441 - val_fbeta: 0.4441
# Epoch 47/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1446 - jaccard_distance_acc: 0.6409 - fbeta: 0.8328 - val_loss: 0.5986 - val_jaccard_distance_acc: 0.3503 - val_fbeta: 0.4610
# Epoch 48/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1432 - jaccard_distance_acc: 0.6418 - fbeta: 0.8332 - val_loss: 0.6044 - val_jaccard_distance_acc: 0.3531 - val_fbeta: 0.4591
# Epoch 49/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1396 - jaccard_distance_acc: 0.6516 - fbeta: 0.8384 - val_loss: 0.6242 - val_jaccard_distance_acc: 0.3518 - val_fbeta: 0.4558
# Epoch 50/50
# 6474/6474 [==============================] - 185s 29ms/step - loss: 0.1382 - jaccard_distance_acc: 0.6537 - fbeta: 0.8399 - val_loss: 0.6139 - val_jaccard_distance_acc: 0.3450 - val_fbeta: 0.4513
# 3151/3151 [==============================] - 27s 8ms/step
# 6
# model.compile(loss=scaled_binary_cross_entropy,
# optimizer=Adam(clipnorm=1, lr=0.001),
# metrics=[accu, fbeta])
# 6' clipnorm=1의 역활이 득이 되는지 아닌지 잘 모르겠다. 평가의 기준이 명확하지 않으니까
# 내가 맞게 평가 하고 학습하는지 잘 모르겠다.
# model.compile(loss=scaled_binary_cross_entropy,
# optimizer=Adam(lr=0.001),
# metrics=[accu, fbeta])
#7
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.001),
metrics=[accu, fbeta])
attention_model = Model(inputs=inp, outputs=attention) # Model to print out the attention data
model.summary()
# STAMP = './First_AttentionNLP_%.2f_%.2f'%(rate_drop_lstm,rate_drop_dense)
# print(STAMP)
# early_stopping =EarlyStopping(monitor='val_loss', patience=5)
# bst_model_path = STAMP + '.h5'
# model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
# # model.fit(X_t, y, validation_data=(x_val,y_val), epochs=3, verbose=1, batch_size=512)
# epoch = 50
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit(k_vec, k_y_train, validation_data=(k_vec_val,k_y_val), epochs=30, verbose=1, batch_size=batch_size,callbacks=callbacks_list)
# model.fit(k_vec, k_y_train, validation_data=(k_vec_val,k_y_val), epochs=3, verbose=1, steps_per_epoch=int(6619/batch_size)+1, validation_steps = int(883/batch_size)+1)
# 5. 모델 평가하기
test_score = model.evaluate(k_vec_test, k_y_test, batch_size=batch_size)
print('')
print(str(test_score))
# 6. 모델 저장하기
from datetime import datetime
# now = datetime.now()
model.save('Attention.h5')
def test_return():
return (k_vec_test,k_y_test)
# # 7. 모델 아키텍처 보기
# from IPython.display import SVG
# from keras.utils.vis_utils import model_to_dot
# # %matplotlib inline
# # str(now.day)+"/"+str(now.hour)+":"+str(now.minute)+":"+str(now.second)+
# SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# #######################################
# ## make the submission
# ########################################
# print('Start making the submission before fine-tuning')
# y_test = model.predict(word_vec_test,y_test_size=32, verbose=1)
# sample_submission = pd.read_csv("../input/sample_submission.csv")
# sample_submission[list_classes] = y_test
# sample_submission.to_csv('%.4f_'%(bst_val_score)+STAMP+'.csv', index=False)
# # 테스트용 코드
# def get_word_importances(text):
# lt = tokenizer.texts_to_sequences([text])
# x = pad_sequences(lt, maxlen=maxlen)
# p = model.predict(x)
# att = attention_model.predict(x)
# return p, [(reverse_token_map.get(word), importance) for word, importance in zip(x[0], att[0]) if word in reverse_token_map]
# emb = Embedding(input_dim=vocab_size, output_dim=EMB_DIM,
# trainable=False, weights=[embedding_matrix], input_length=text_max_words)
# # 임베딩 dropout
# # GaussianNoise(mean=0.0, stddev=0.2)
# keras.layers.GaussianNoise(stddev=0.2)
# keras.layers.GaussianDropout(rate)
# # LSTM(310, 250, num_layers=2, batch_first=True, dropout=0.3, bidirectional=True)
# # Dropout(p=0.3)
# # n_timesteps = text_max_words
# # return_sequences = True는 각 lstm의 hidden state를 출력하는 인자이다.
# =Bidirectional(LSTM(250, return_sequences=True), input_shape=(text_max_words, EMB_DIM))(emb)
# # SelfAttention(
# # Sequential(
# # Linear(in_features=500, out_features=1, bias=True)
# # Tahh()
# # Dropout(0.3)f
# # Linear(in_features=500, out_features=1, bias=True)
# # Tanh()
# # Dropout(p=0.3)
# # )
# # Softmax()
# # )
# # )
# # Linear(in_features=500, out_features=11, bias=True) binary classification
# # )
# model.compile()
# word_vec = []
# for sent in tmp:
# sub = []
# for word in sent:
# if(word in embeddings_index.keys()):
# sub.append(embeddings_index[word])
# else:
# sub.append(np.random.uniform(-0.25,0.25,300)) ## used for OOV words
# word_vec.append(sub)
# return np.array(word_vec)
| 66.351
| 3,718
| 0.573465
| 9,546
| 66,351
| 3.793002
| 0.150115
| 0.060208
| 0.153612
| 0.160572
| 0.6548
| 0.599564
| 0.565151
| 0.542283
| 0.511351
| 0.499972
| 0
| 0.167563
| 0.140134
| 66,351
| 1,000
| 3,719
| 66.351
| 0.467074
| 0.813085
| 0
| 0.172414
| 0
| 0
| 0.100691
| 0.026448
| 0
| 0
| 0
| 0.001
| 0
| 1
| 0.019157
| false
| 0
| 0.114943
| 0.003831
| 0.153257
| 0.187739
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7c39a0225a53a4903ba1e5f3e2520ad263e09bd9
| 55
|
py
|
Python
|
pylibs/hej/__main__.py
|
srittau/hej
|
87f2e577205895a988813abc057cd62234c87379
|
[
"MIT"
] | null | null | null |
pylibs/hej/__main__.py
|
srittau/hej
|
87f2e577205895a988813abc057cd62234c87379
|
[
"MIT"
] | 90
|
2018-07-31T22:50:13.000Z
|
2022-03-29T06:29:48.000Z
|
pylibs/hej/__main__.py
|
srittau/hej
|
87f2e577205895a988813abc057cd62234c87379
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from .main import main
main()
| 9.166667
| 22
| 0.690909
| 9
| 55
| 4.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.163636
| 55
| 5
| 23
| 11
| 0.804348
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7c50853e04f52f8b7c864cc192d121b7a52f4861
| 117
|
py
|
Python
|
exceptions/user_exception.py
|
iseoluwaYN/eden
|
aa11f14ae41bb752f424abbcb6b8b9037a01c1b0
|
[
"MIT"
] | null | null | null |
exceptions/user_exception.py
|
iseoluwaYN/eden
|
aa11f14ae41bb752f424abbcb6b8b9037a01c1b0
|
[
"MIT"
] | 1
|
2021-09-09T10:19:14.000Z
|
2021-09-09T10:19:14.000Z
|
exceptions/user_exception.py
|
iseoluwaYN/eden
|
aa11f14ae41bb752f424abbcb6b8b9037a01c1b0
|
[
"MIT"
] | 19
|
2021-08-19T13:01:18.000Z
|
2021-08-19T15:08:05.000Z
|
class UserException(Exception):
def __init__(self, message):
super(UserException, self).__init__(message)
| 39
| 52
| 0.735043
| 12
| 117
| 6.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 117
| 3
| 52
| 39
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
7c56bad3cfe0b9d1f85c61ac0034fe4ca5fb750f
| 18,040
|
py
|
Python
|
x-dump/utilities/miscellaneous_tester.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
x-dump/utilities/miscellaneous_tester.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
x-dump/utilities/miscellaneous_tester.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
###!/Users/zhiyang/anaconda3/bin/python3
"""
This Python script is written by Zhiyang Ong to test miscellaneous
methods in the miscellaneous class.
Synopsis:
Perform a subset of the methods in the miscellaneous class.
Revision History:
July 31, 2018 Version 0.1, initial build.
"""
__author__ = 'Zhiyang Ong'
__version__ = '1.0'
__date__ = 'July 31, 2018'
# The MIT License (MIT)
# Copyright (c) <2018> <Zhiyang Ong>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" Che cosa significa?
###############################################################
"""
Import modules from The Python Standard Library.
sys Get access to any command-line arguments.
os Use any operating system dependent functionality.
os.path For pathname manipulations.
subprocess -> call
To make system calls.
time To measure elapsed time.
warnings Raise warnings.
re Use regular expressions.
calendar For checking if given year is a leap year.
"""
import sys
#import os
import os.path
#from subprocess import call
import subprocess
#import time
import warnings
#import re
import calendar
###############################################################
# Import Custom Python Modules
"""
Package and module to print statistics of software testing
results.
"""
from statistic_pkg.test_statistics import statistical_analysis
# Package and module to perform file I/O operations.
from utilities.file_io import file_io_operations
"""
Package and module to configure the software application's
parameters.
"""
from utilities.configuration_manager import config_manager
"""
Package and module to perform methods in the miscellaneous
class.
"""
from utilities.miscellaneous import misc
"""
Module to test if the generated filename (based on the
then-current time stamp) conforms to the specified
format.
"""
from utilities.generate_results_filename_tester import generate_filename_tester
###############################################################
## Module that tests methods that perform miscellaneous tasks.
class misc_tester:
## =========================================================
# Method to test the methods that perform file I/O operations
# with an invalid file.
# @param - Nothing
# @return - Nothing.
# O(1) method.
@staticmethod
def test_check_filename_format():
print(" Testing the filename format checker.")
prompt = " ... Test: incorrect file extension is '.txt'. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.check_filename_format("tyuw.iew"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename no file extension has 6 tokens. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.check_filename_format("HH-MM-SS-uS.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with -ve DD/day. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.check_filename_format("-5-MM-YY-HH-MM-SS-uS.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with DD/day >29, Feb. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("35-2-2016-00-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with DD/day=29, Feb, leap year. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.check_filename_format("29-2-2016-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with DD/day=28, Feb, not leap year. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("28-2-2017-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with DD/day = 34. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("34-6-2017-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with DD/day=31, 31 day mth. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("31-7-2017-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with DD/day=30, 30 day mth. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-9-2017-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with MM/month = 0. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-0-2017-00-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with MM/month = -4. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30--4-2017-00-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with MM/month = 15. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-15-2017-00-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with MM/month = 9. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-9-2017-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with YY/year = 1582. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-11-1582-00-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with YY/year = 2083. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("2-2-2083-00-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with HH/hour = -3. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("30-5-2015--3-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with HH/hour = 25. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("3-5-2017-25-00-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with HH/hour = 17. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("12-1-2013-17-00-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with MM/minute = -8. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("7-4-2012-2--8-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with MM/minute = 73. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-73-00-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with MM/minute = 59. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-59-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with MM/minute = 0. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-0-00-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with SS/second = -4. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8--4-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with SS/second = 81. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-81-00.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with SS/second = 36. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-36-00.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with uS/microsecond = -129. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-4--129.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with uS/microsecond = 16534785929. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-32-16534785929.txt"):
print(prompt .format("FAIL!!!"))
else:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
prompt = " ... Test: filename with uS/microsecond = 0. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-32-0.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: filename with uS/microsecond = 999999. {}"
statistical_analysis.increment_number_test_cases_used()
"""
All fields/tokens need to be numbers, else an exception
would be thrown.
"""
if misc.check_filename_format("25-1-2020-5-8-51-999999.txt"):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
## =========================================================
# Method to test the miscellaneous method that determines
# where to store the results of experimental, simulation,
# verification, and testing runs.
# This does not correct check if the results file is placed
# in the correct subdirectory of the results repository.
# #### TO BE COMPLETED
# Test if the subdirectory is correct. This is busywork.
# @param - Nothing
# @return a string representing the location to store the
# aforementioned results.
# O(1) method.
@staticmethod
def test_find_desired_location_for_results():
incorrect_format_result = "'filename' needs to have the format: DD-MM-YY-HH-MM-SS-uS.txt."
print("== Test: test_find_desired_location_for_results().")
test_filename = "25-3-2010-5-8-51-9994073289.dwq"
results_location = misc.find_desired_location_for_results(test_filename)
prompt = " ... Test: filename is 25-3-2010-5-8-51-9994073289.dwq. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.find_desired_location_for_results(test_filename) == incorrect_format_result:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
print("== Test: test_find_desired_location_for_results().")
test_filename = "25-3-2010-5-8-51-9407.txt"
results_location = misc.find_desired_location_for_results(test_filename)
prompt = " ... Test: filename 25-3-2010-5-8-51-9407.txt included. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.check_absolute_path_to_store_results(results_location,test_filename):
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
prompt = " ... Test: 25-3-2010-5-8-51-9407.txt, correct path. {}"
statistical_analysis.increment_number_test_cases_used()
if misc.get_absolute_path_to_store_results() in results_location:
print(prompt .format("OK"))
statistical_analysis.increment_number_test_cases_passed()
else:
print(prompt .format("FAIL!!!"))
"""
print("results_location:",results_location,"=")
print("misc.get_absolute_path_to_store_results():",misc.get_absolute_path_to_store_results(),"=")
print(results_location.find(misc.get_absolute_path_to_store_results()))
"""
f_obj = misc.store_results(results_location)
f_obj.write("Storage of experimental, simulation, verification, and testing results work.")
file_io_operations.close_file_object(f_obj)
## =========================================================
# Method to test the miscellaneous methods.
# @param - Nothing
# @return - Nothing.
# O(1) method.
@staticmethod
def test_miscellaneous_methods():
print("")
print("")
print("== Testing class: misc.")
misc_tester.test_check_filename_format()
misc_tester.test_find_desired_location_for_results()
| 38.382979
| 462
| 0.707705
| 2,469
| 18,040
| 4.968813
| 0.140543
| 0.100668
| 0.146071
| 0.177372
| 0.736143
| 0.714868
| 0.697587
| 0.687154
| 0.67778
| 0.667835
| 0
| 0.038319
| 0.147949
| 18,040
| 469
| 463
| 38.464819
| 0.759807
| 0.153326
| 0
| 0.657692
| 0
| 0.015385
| 0.244296
| 0.077235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011538
| false
| 0.123077
| 0.038462
| 0
| 0.053846
| 0.269231
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
7c5a635ec2fdb6b840f73c0a5f173226e68df761
| 2,392
|
py
|
Python
|
kubernetes/client/apis/__init__.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/apis/__init__.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/apis/__init__.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# import apis into api package
from .admissionregistration_api import AdmissionregistrationApi
from .admissionregistration_v1alpha1_api import AdmissionregistrationV1alpha1Api
from .apiextensions_api import ApiextensionsApi
from .apiextensions_v1beta1_api import ApiextensionsV1beta1Api
from .apiregistration_api import ApiregistrationApi
from .apiregistration_v1beta1_api import ApiregistrationV1beta1Api
from .apis_api import ApisApi
from .apps_api import AppsApi
from .apps_v1beta1_api import AppsV1beta1Api
from .apps_v1beta2_api import AppsV1beta2Api
from .authentication_api import AuthenticationApi
from .authentication_v1_api import AuthenticationV1Api
from .authentication_v1beta1_api import AuthenticationV1beta1Api
from .authorization_api import AuthorizationApi
from .authorization_v1_api import AuthorizationV1Api
from .authorization_v1beta1_api import AuthorizationV1beta1Api
from .autoscaling_api import AutoscalingApi
from .autoscaling_v1_api import AutoscalingV1Api
from .autoscaling_v2beta1_api import AutoscalingV2beta1Api
from .batch_api import BatchApi
from .batch_v1_api import BatchV1Api
from .batch_v1beta1_api import BatchV1beta1Api
from .batch_v2alpha1_api import BatchV2alpha1Api
from .certificates_api import CertificatesApi
from .certificates_v1beta1_api import CertificatesV1beta1Api
from .core_api import CoreApi
from .core_v1_api import CoreV1Api
from .custom_objects_api import CustomObjectsApi
from .extensions_api import ExtensionsApi
from .extensions_v1beta1_api import ExtensionsV1beta1Api
from .logs_api import LogsApi
from .networking_api import NetworkingApi
from .networking_v1_api import NetworkingV1Api
from .policy_api import PolicyApi
from .policy_v1beta1_api import PolicyV1beta1Api
from .rbac_authorization_api import RbacAuthorizationApi
from .rbac_authorization_v1_api import RbacAuthorizationV1Api
from .rbac_authorization_v1alpha1_api import RbacAuthorizationV1alpha1Api
from .rbac_authorization_v1beta1_api import RbacAuthorizationV1beta1Api
from .scheduling_api import SchedulingApi
from .scheduling_v1alpha1_api import SchedulingV1alpha1Api
from .settings_api import SettingsApi
from .settings_v1alpha1_api import SettingsV1alpha1Api
from .storage_api import StorageApi
from .storage_v1_api import StorageV1Api
from .storage_v1beta1_api import StorageV1beta1Api
from .version_api import VersionApi
| 46.901961
| 80
| 0.896739
| 276
| 2,392
| 7.471014
| 0.297101
| 0.205141
| 0.085354
| 0.023278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040128
| 0.083194
| 2,392
| 50
| 81
| 47.84
| 0.900137
| 0.011706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7c77680fd062cc2598a075d374892a1667353a63
| 249
|
py
|
Python
|
python-design/code-examples/Section 2/Section 2 Code/5428_02_ CODE_FD_MD_1stdraft_ACC/singleton.py
|
vermuz/mani-professional-notes
|
896328e81e376bc113553c81d38ad6c1781b8e0b
|
[
"CC-BY-3.0"
] | 11
|
2018-06-07T15:54:40.000Z
|
2021-07-24T19:08:52.000Z
|
python-design/code-examples/Section 2/Section 2 Code/5428_02_ CODE_FD_MD_1stdraft_ACC/singleton.py
|
vermuz/mani-professional-notes
|
896328e81e376bc113553c81d38ad6c1781b8e0b
|
[
"CC-BY-3.0"
] | null | null | null |
python-design/code-examples/Section 2/Section 2 Code/5428_02_ CODE_FD_MD_1stdraft_ACC/singleton.py
|
vermuz/mani-professional-notes
|
896328e81e376bc113553c81d38ad6c1781b8e0b
|
[
"CC-BY-3.0"
] | 8
|
2018-02-19T16:12:09.000Z
|
2021-12-24T09:20:26.000Z
|
class Singleton:
__instance = None
def __new__(cls, val=None):
if Singleton.__instance is None:
Singleton.__instance = object.__new__(cls)
Singleton.__instance.val = val
return Singleton.__instance
| 31.125
| 55
| 0.64257
| 26
| 249
| 5.461538
| 0.461538
| 0.598592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285141
| 249
| 7
| 56
| 35.571429
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
7c8dc6e2a1b1abf82f396132d37090de977197f4
| 243
|
py
|
Python
|
xai/explainer/__init__.py
|
ngpgn/contextual-ai
|
deb119395ced5242f243b2b31c074507e96646c0
|
[
"Apache-2.0"
] | 83
|
2020-06-17T04:07:29.000Z
|
2022-03-12T13:45:24.000Z
|
xai/explainer/__init__.py
|
ngpgn/contextual-ai
|
deb119395ced5242f243b2b31c074507e96646c0
|
[
"Apache-2.0"
] | 15
|
2020-06-30T09:22:19.000Z
|
2021-11-11T10:52:40.000Z
|
xai/explainer/__init__.py
|
ngpgn/contextual-ai
|
deb119395ced5242f243b2b31c074507e96646c0
|
[
"Apache-2.0"
] | 11
|
2020-06-17T17:01:24.000Z
|
2022-02-27T18:53:03.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved
# ============================================================================
from .explainer_factory import ExplainerFactory
| 34.714286
| 78
| 0.506173
| 23
| 243
| 5.304348
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023041
| 0.106996
| 243
| 6
| 79
| 40.5
| 0.539171
| 0.765432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7c9febbc7b0fa16b8d184bf8c1e9bab60512d404
| 285
|
py
|
Python
|
main_console.py
|
TPei/jawbone_visualizer
|
dadefbddb47450c3c43d474abaf9ae8d1317e03b
|
[
"MIT"
] | null | null | null |
main_console.py
|
TPei/jawbone_visualizer
|
dadefbddb47450c3c43d474abaf9ae8d1317e03b
|
[
"MIT"
] | 1
|
2015-05-11T07:21:21.000Z
|
2015-05-11T07:21:21.000Z
|
main_console.py
|
TPei/jawbone_visualizer
|
dadefbddb47450c3c43d474abaf9ae8d1317e03b
|
[
"MIT"
] | null | null | null |
__author__ = 'TPei'
from data.manager import *
#visualize_sleep_per_weekday()
#plot_sleep()
#plot_step_graph()
plot_all(get_all_the_data())
#print(get_all_the_data('awake_time'))
#compareDicts(get_all_the_data(), get_all_the_data('awake_time'))
#coffee_effect_sleep(get_all_the_data())
| 31.666667
| 65
| 0.810526
| 46
| 285
| 4.369565
| 0.478261
| 0.149254
| 0.223881
| 0.323383
| 0.218905
| 0.218905
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049123
| 285
| 9
| 66
| 31.666667
| 0.741697
| 0.694737
| 0
| 0
| 0
| 0
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7ca6d61751dd8b252eb5706b6b419e29608de0b6
| 158
|
py
|
Python
|
ehpi_action_recognition/configurator.py
|
steuwe/ehpi_action_recognition
|
4318e82e541c9b42bf0af7976815229ed6261c39
|
[
"MIT"
] | 100
|
2019-04-16T17:18:02.000Z
|
2022-02-23T08:59:51.000Z
|
ehpi_action_recognition/configurator.py
|
steuwe/ehpi_action_recognition
|
4318e82e541c9b42bf0af7976815229ed6261c39
|
[
"MIT"
] | 15
|
2019-06-14T13:30:12.000Z
|
2022-02-17T12:16:07.000Z
|
ehpi_action_recognition/configurator.py
|
steuwe/ehpi_action_recognition
|
4318e82e541c9b42bf0af7976815229ed6261c39
|
[
"MIT"
] | 21
|
2019-05-08T03:29:12.000Z
|
2022-03-05T05:51:00.000Z
|
def setup_application():
# Example to change config stuff, call this method before everything else.
# config.cache_config.cache_dir = "abc"
a = 1
| 31.6
| 78
| 0.708861
| 22
| 158
| 4.954545
| 0.863636
| 0.201835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008
| 0.208861
| 158
| 4
| 79
| 39.5
| 0.864
| 0.696203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7cb9993a81a5d7f79507310f093d2f6a43fbee19
| 1,508
|
py
|
Python
|
main.py
|
TiffanyXQZ/tf26
|
52e4779dee0a5f398da62d9ffbd3bdc55b3d463d
|
[
"MIT"
] | null | null | null |
main.py
|
TiffanyXQZ/tf26
|
52e4779dee0a5f398da62d9ffbd3bdc55b3d463d
|
[
"MIT"
] | null | null | null |
main.py
|
TiffanyXQZ/tf26
|
52e4779dee0a5f398da62d9ffbd3bdc55b3d463d
|
[
"MIT"
] | 1
|
2022-01-31T21:59:52.000Z
|
2022-01-31T21:59:52.000Z
|
import tensorflow as tf
print(tf.config.list_physical_devices())
# import matplotlib.pyplot as plt
# import numpy as np
# import tensorflow as tf
#
# points_n = 200
# clusters_n = 3
# iteration_n = 100
#
# points = tf.constant(np.random.uniform(0, 10, (points_n, 2)))
# centroids = tf.Variable(tf.slice(tf.random.shuffle(points), [0, 0], [clusters_n, -1]))
#
# points_expanded = tf.expand_dims(points, 0)
# centroids_expanded = tf.expand_dims(centroids, 1)
#
# distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
# assignments = tf.argmin(distances, 0)
#
# means = []
# for c in range(clusters_n):
# means.append(tf.reduce_mean(
# tf.gather(points,
# tf.reshape(
# tf.where(
# tf.equal(assignments, c)
# ), [1, -1])
# ), reduction_indices=[1]))
#
# new_centroids = tf.concat(means, 0)
#
# update_centroids = tf.assign(centroids, new_centroids)
# init = tf.global_variables_initializer()
#
# with tf.Session() as sess:
# sess.run(init)
# for step in range(iteration_n):
# [_, centroid_values, points_values, assignment_values] = sess.run(
# [update_centroids, centroids, points, assignments])
#
# print("centroids", centroid_values)
#
# plt.scatter(points_values[:, 0], points_values[:, 1], c=assignment_values, s=50, alpha=0.5)
# plt.plot(centroid_values[:, 0], centroid_values[:, 1], 'kx', markersize=15)
# plt.show()
| 32.085106
| 93
| 0.639257
| 195
| 1,508
| 4.769231
| 0.415385
| 0.060215
| 0.03871
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026801
| 0.208223
| 1,508
| 47
| 94
| 32.085106
| 0.752094
| 0.898541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 4
|
7cc53f31b06a151312323ad2083c3fd2f9de05b8
| 927
|
py
|
Python
|
moto/efs/urls.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
moto/efs/urls.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | 1
|
2022-03-07T07:39:03.000Z
|
2022-03-07T07:39:03.000Z
|
moto/efs/urls.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
from .responses import EFSResponse
url_bases = [
r"https?://elasticfilesystem\.(.+)\.amazonaws.com",
r"https?://elasticfilesystem\.amazonaws.com",
]
response = EFSResponse()
url_paths = {
"{0}/.*?$": response.dispatch,
"/2015-02-01/access-points": response.dispatch,
"/2015-02-01/access-points/<access_point_id>": response.dispatch,
"/2015-02-01/file-systems": response.dispatch,
"/2015-02-01/file-systems/<file_system_id>": response.dispatch,
"/2015-02-01/file-systems/<file_system_id>/backup-policy": response.dispatch,
"/2015-02-01/file-systems/<file_system_id>/lifecycle-configuration": response.dispatch,
"/2015-02-01/mount-targets": response.dispatch,
"/2015-02-01/mount-targets/<mount_target_id>": response.dispatch,
"/2015-02-01/mount-targets/<mount_target_id>/security-groups": response.dispatch,
"/2015-02-01/resource-tags/<resource_id>": response.dispatch,
}
| 37.08
| 91
| 0.707659
| 119
| 927
| 5.386555
| 0.302521
| 0.274571
| 0.312012
| 0.343214
| 0.74571
| 0.599064
| 0.599064
| 0.430577
| 0.372855
| 0.372855
| 0
| 0.097473
| 0.10356
| 927
| 24
| 92
| 38.625
| 0.673887
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 0.546926
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7ccb809f234d6084d3fba2fa3c9122e2a9a0cb1c
| 57
|
py
|
Python
|
tests/checks/__init__.py
|
aidanmelen/website_checker
|
f2324a67cb8e0288bcbee8ecdae9b836bab23f8e
|
[
"MIT"
] | 1
|
2021-05-18T18:19:45.000Z
|
2021-05-18T18:19:45.000Z
|
tests/checks/__init__.py
|
aidanmelen/website_checker
|
f2324a67cb8e0288bcbee8ecdae9b836bab23f8e
|
[
"MIT"
] | null | null | null |
tests/checks/__init__.py
|
aidanmelen/website_checker
|
f2324a67cb8e0288bcbee8ecdae9b836bab23f8e
|
[
"MIT"
] | null | null | null |
"""Test suite for the website_checker.checks package."""
| 28.5
| 56
| 0.754386
| 8
| 57
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 57
| 1
| 57
| 57
| 0.823529
| 0.877193
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7ce5fb2da678eac7006b6e95ceba3b54b072463f
| 42,915
|
py
|
Python
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = rnn.static_rnn(
rnn_cell.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
rnn_cell.BasicLSTMCell, 2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.test_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = rnn_cell.MultiRNNCell(
# [rnn_cell.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensional tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, logits):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=logits,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.global_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 20% margin of error.
1.2 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
| 38.978202
| 86
| 0.601981
| 5,334
| 42,915
| 4.609861
| 0.07799
| 0.049697
| 0.035138
| 0.019521
| 0.783887
| 0.749319
| 0.721095
| 0.700761
| 0.687137
| 0.675058
| 0
| 0.032571
| 0.288873
| 42,915
| 1,100
| 87
| 39.013636
| 0.77315
| 0.154212
| 0
| 0.668241
| 0
| 0
| 0.007538
| 0.000637
| 0
| 0
| 0
| 0.000909
| 0.107438
| 1
| 0.03778
| false
| 0
| 0.029516
| 0.002361
| 0.082645
| 0.001181
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7ce819ce9237fcd0822279c4b4ad2383ea8a5407
| 1,616
|
py
|
Python
|
ossdbtoolsservice/query/data_storage/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
ossdbtoolsservice/query/data_storage/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
ossdbtoolsservice/query/data_storage/__init__.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ossdbtoolsservice.query.data_storage.storage_data_reader import StorageDataReader
from ossdbtoolsservice.query.data_storage.service_buffer_file_stream_writer import ServiceBufferFileStreamWriter
from ossdbtoolsservice.query.data_storage.service_buffer_file_stream_reader import ServiceBufferFileStreamReader
from ossdbtoolsservice.query.data_storage.file_stream_factory import FileStreamFactory
from ossdbtoolsservice.query.data_storage.save_as_csv_writer import SaveAsCsvWriter
from ossdbtoolsservice.query.data_storage.save_as_csv_file_stream_factory import SaveAsCsvFileStreamFactory
from ossdbtoolsservice.query.data_storage.save_as_json_writer import SaveAsJsonWriter
from ossdbtoolsservice.query.data_storage.save_as_json_file_stream_factory import SaveAsJsonFileStreamFactory
from ossdbtoolsservice.query.data_storage.save_as_excel_writer import SaveAsExcelWriter
from ossdbtoolsservice.query.data_storage.save_as_excel_writer_factory import SaveAsExcelFileStreamFactory
__all__ = [
'FileStreamFactory', 'SaveAsCsvWriter', 'SaveAsJsonWriter', 'SaveAsExcelWriter', 'SaveAsExcelFileStreamFactory',
'SaveAsJsonFileStreamFactory', 'SaveAsCsvFileStreamFactory', 'ServiceBufferFileStreamWriter',
'ServiceBufferFileStreamReader', 'StorageDataReader'
]
| 73.454545
| 116
| 0.782178
| 148
| 1,616
| 8.209459
| 0.317568
| 0.17284
| 0.213992
| 0.246914
| 0.401646
| 0.340741
| 0.340741
| 0.340741
| 0.187654
| 0
| 0
| 0
| 0.058787
| 1,616
| 21
| 117
| 76.952381
| 0.798817
| 0.207921
| 0
| 0
| 0
| 0
| 0.173333
| 0.10902
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7cee0a9ba7f29bf6898fd8366b5ada8790465a30
| 104
|
py
|
Python
|
Python Basics/While Loop/Lab/Task03.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | 1
|
2022-03-16T10:23:04.000Z
|
2022-03-16T10:23:04.000Z
|
Python Basics/While Loop/Lab/Task03.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | null | null | null |
Python Basics/While Loop/Lab/Task03.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | 1
|
2021-12-04T12:30:57.000Z
|
2021-12-04T12:30:57.000Z
|
x = int(input())
numbers = 0
while numbers < x:
y = int(input())
numbers += y
print(numbers)
| 10.4
| 20
| 0.567308
| 15
| 104
| 3.933333
| 0.533333
| 0.271186
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.269231
| 104
| 9
| 21
| 11.555556
| 0.763158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6b17667c63ee699f67543db9de9de99bc805f59c
| 122
|
py
|
Python
|
gui/apps.py
|
narsi84/digilib
|
eec9632e7b7d3cb64a9832e313f47719621219c6
|
[
"MIT"
] | null | null | null |
gui/apps.py
|
narsi84/digilib
|
eec9632e7b7d3cb64a9832e313f47719621219c6
|
[
"MIT"
] | null | null | null |
gui/apps.py
|
narsi84/digilib
|
eec9632e7b7d3cb64a9832e313f47719621219c6
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
class GuiConfig(AppConfig):
name = 'gui'
| 15.25
| 39
| 0.778689
| 15
| 122
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 122
| 7
| 40
| 17.428571
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0.02459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
6b3abb8f30388b08a82b7a065add3a309e439bc7
| 106
|
py
|
Python
|
assignments/essential/jinpackage/count.py
|
JinTang96/cv
|
530a3082bf20a9d624d09a1ace1bce6703e524ac
|
[
"MIT"
] | null | null | null |
assignments/essential/jinpackage/count.py
|
JinTang96/cv
|
530a3082bf20a9d624d09a1ace1bce6703e524ac
|
[
"MIT"
] | null | null | null |
assignments/essential/jinpackage/count.py
|
JinTang96/cv
|
530a3082bf20a9d624d09a1ace1bce6703e524ac
|
[
"MIT"
] | null | null | null |
# 2. Write a function that count 1 to n in the module.
def count(n):
print([i for i in range(1,n+1)])
| 26.5
| 54
| 0.641509
| 24
| 106
| 2.833333
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.226415
| 106
| 3
| 55
| 35.333333
| 0.780488
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6b45128f58ded7b150dcb763a0e88c5175ac0e32
| 89
|
py
|
Python
|
checkin/apps.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | 1
|
2020-07-16T02:41:35.000Z
|
2020-07-16T02:41:35.000Z
|
checkin/apps.py
|
borsezf2/dscatc-webapp
|
68867cb528cdd1698b251156d26049b384357c7d
|
[
"MIT"
] | 22
|
2021-02-26T20:32:50.000Z
|
2021-10-21T06:21:20.000Z
|
checkin/apps.py
|
borsezf2/dscatc-webapp
|
68867cb528cdd1698b251156d26049b384357c7d
|
[
"MIT"
] | 9
|
2019-09-25T08:22:52.000Z
|
2019-10-02T14:11:33.000Z
|
from django.apps import AppConfig
class CheckinConfig(AppConfig):
name = 'checkin'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
86099710b6f8db2187636f8e6f63fb2cb177b87d
| 5,475
|
py
|
Python
|
project-euler/13.py
|
hydroo/coding-and-math-exercises
|
c0c9b8ae48e043b0809e4c592444f3e4bc3222d8
|
[
"CC0-1.0"
] | 2
|
2015-05-29T21:03:26.000Z
|
2019-11-11T02:10:53.000Z
|
project-euler/13.py
|
hydroo/coding-and-math-exercises
|
c0c9b8ae48e043b0809e4c592444f3e4bc3222d8
|
[
"CC0-1.0"
] | null | null | null |
project-euler/13.py
|
hydroo/coding-and-math-exercises
|
c0c9b8ae48e043b0809e4c592444f3e4bc3222d8
|
[
"CC0-1.0"
] | null | null | null |
#! /usr/bin/python
nums = (37107287533902102798797998220837590246510135740250
,46376937677490009712648124896970078050417018260538
,74324986199524741059474233309513058123726617309629
,91942213363574161572522430563301811072406154908250
,23067588207539346171171980310421047513778063246676
,89261670696623633820136378418383684178734361726757
,28112879812849979408065481931592621691275889832738
,44274228917432520321923589422876796487670272189318
,47451445736001306439091167216856844588711603153276
,70386486105843025439939619828917593665686757934951
,62176457141856560629502157223196586755079324193331
,64906352462741904929101432445813822663347944758178
,92575867718337217661963751590579239728245598838407
,58203565325359399008402633568948830189458628227828
,80181199384826282014278194139940567587151170094390
,35398664372827112653829987240784473053190104293586
,86515506006295864861532075273371959191420517255829
,71693888707715466499115593487603532921714970056938
,54370070576826684624621495650076471787294438377604
,53282654108756828443191190634694037855217779295145
,36123272525000296071075082563815656710885258350721
,45876576172410976447339110607218265236877223636045
,17423706905851860660448207621209813287860733969412
,81142660418086830619328460811191061556940512689692
,51934325451728388641918047049293215058642563049483
,62467221648435076201727918039944693004732956340691
,15732444386908125794514089057706229429197107928209
,55037687525678773091862540744969844508330393682126
,18336384825330154686196124348767681297534375946515
,80386287592878490201521685554828717201219257766954
,78182833757993103614740356856449095527097864797581
,16726320100436897842553539920931837441497806860984
,48403098129077791799088218795327364475675590848030
,87086987551392711854517078544161852424320693150332
,59959406895756536782107074926966537676326235447210
,69793950679652694742597709739166693763042633987085
,41052684708299085211399427365734116182760315001271
,65378607361501080857009149939512557028198746004375
,35829035317434717326932123578154982629742552737307
,94953759765105305946966067683156574377167401875275
,88902802571733229619176668713819931811048770190271
,25267680276078003013678680992525463401061632866526
,36270218540497705585629946580636237993140746255962
,24074486908231174977792365466257246923322810917141
,91430288197103288597806669760892938638285025333403
,34413065578016127815921815005561868836468420090470
,23053081172816430487623791969842487255036638784583
,11487696932154902810424020138335124462181441773470
,63783299490636259666498587618221225225512486764533
,67720186971698544312419572409913959008952310058822
,95548255300263520781532296796249481641953868218774
,76085327132285723110424803456124867697064507995236
,37774242535411291684276865538926205024910326572967
,23701913275725675285653248258265463092207058596522
,29798860272258331913126375147341994889534765745501
,18495701454879288984856827726077713721403798879715
,38298203783031473527721580348144513491373226651381
,34829543829199918180278916522431027392251122869539
,40957953066405232632538044100059654939159879593635
,29746152185502371307642255121183693803580388584903
,41698116222072977186158236678424689157993532961922
,62467957194401269043877107275048102390895523597457
,23189706772547915061505504953922979530901129967519
,86188088225875314529584099251203829009407770775672
,11306739708304724483816533873502340845647058077308
,82959174767140363198008187129011875491310547126581
,97623331044818386269515456334926366572897563400500
,42846280183517070527831839425882145521227251250327
,55121603546981200581762165212827652751691296897789
,32238195734329339946437501907836945765883352399886
,75506164965184775180738168837861091527357929701337
,62177842752192623401942399639168044983993173312731
,32924185707147349566916674687634660915035914677504
,99518671430235219628894890102423325116913619626622
,73267460800591547471830798392868535206946944540724
,76841822524674417161514036427982273348055556214818
,97142617910342598647204516893989422179826088076852
,87783646182799346313767754307809363333018982642090
,10848802521674670883215120185883543223812876952786
,71329612474782464538636993009049310363619763878039
,62184073572399794223406235393808339651327408011116
,66627891981488087797941876876144230030984490851411
,60661826293682836764744779239180335110989069790714
,85786944089552990653640447425576083659976645795096
,66024396409905389607120198219976047599490197230297
,64913982680032973156037120041377903785566085089252
,16730939319872750275468906903707539413042652315011
,94809377245048795150954100921645863754710598436791
,78639167021187492431995700641917969777599028300699
,15368713711936614952811305876380278410754449733078
,40789923115535562561142322423255033685442488917353
,44889911501440648020369068063960672322193204149535
,41503128880339536053299340368006977710650566631954
,81234880673210146739058568557934581403627822703280
,82616570773948327592232845941706525094512325230608
,22918802058777319719839450180888072429661980811197
,77158542502016545090413245809786882778948721859617
,72107838435069186155435662884062257473692284509516
,20849603980134001723930671666823555245252804609722
,53503534226472524250874054075591789781264330331690 )
res = 0
for a in nums :
res += a
print res
| 49.324324
| 58
| 0.920913
| 114
| 5,475
| 44.22807
| 0.964912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.970503
| 0.058813
| 5,475
| 110
| 59
| 49.772727
| 0.007957
| 0.003105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.009615
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
861aefe5d6b193c96144e3a1d5c3d6871cc34e1f
| 3,565
|
py
|
Python
|
multiworld/envs/pygame/__init__.py
|
szk9876/multiworld
|
c90149d2913280298d5c54236ac40b793b0c2632
|
[
"MIT"
] | null | null | null |
multiworld/envs/pygame/__init__.py
|
szk9876/multiworld
|
c90149d2913280298d5c54236ac40b793b0c2632
|
[
"MIT"
] | null | null | null |
multiworld/envs/pygame/__init__.py
|
szk9876/multiworld
|
c90149d2913280298d5c54236ac40b793b0c2632
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
import logging
LOGGER = logging.getLogger(__name__)
_REGISTERED = False
def register_custom_envs():
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
LOGGER.info("Registering multiworld pygame gym environments")
register(
id='Point2DLargeEnv-offscreen-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'git-commit-hash': '166f0f3',
'author': 'Vitchyr'
},
kwargs={
'images_are_rgb': True,
'target_radius': 1,
'ball_radius': 1,
'render_onscreen': False,
},
)
register(
id='Point2DLargeEnv-onscreen-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'git-commit-hash': '166f0f3',
'author': 'Vitchyr'
},
kwargs={
'images_are_rgb': True,
'target_radius': 1,
'ball_radius': 1,
'render_onscreen': True,
},
)
register(
id='Point2DWalls-corner-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DWallEnv',
tags={
'author': 'Kyle'
},
kwargs={
'wall_shape': 'maze',
'initial_position': (-8, -8),
'images_are_rgb': True,
'target_radius': 0,
'ball_radius': 0.25,
'render_onscreen': False,
'fixed_goal': False,
'randomize_position_on_reset': False,
'render_size': 84,
'boundary_dist': 10,
'action_limit': 1.0,
'show_goal': False
}
)
register(
id='Point2D-center-v0',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'author': 'Kyle'
},
kwargs={
'initial_position': (0, 0),
'images_are_rgb': True,
'target_radius': 0,
'ball_radius': 0.25,
'render_onscreen': False,
'fixed_goal': False,
'randomize_position_on_reset': False,
'render_size': 84,
'boundary_dist': 100,
'action_limit': 1.0,
'show_goal': False
}
)
register(
id='Point2D-center-v2',
entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
tags={
'author': 'Saurabh'
},
kwargs={
'initial_position': (0, 0),
'images_are_rgb': True,
'target_radius': 0.5,
'ball_radius': 0.25,
'render_onscreen': False,
'fixed_goal': (3.5, 3.5),
'randomize_position_on_reset': False,
'render_size': 84,
'boundary_dist': 4,
'action_limit': 1.0,
'show_goal': False
}
)
register(
id='Point2DWalls-corner-v2',
entry_point='multiworld.envs.pygame.point2d:Point2DWallEnv',
tags={
'author': 'Saurabh'
},
kwargs={
'wall_shape': '--',
'initial_position': (0, -4),
'images_are_rgb': True,
'target_radius': 0.5,
'ball_radius': 0.25,
'render_onscreen': False,
'fixed_goal': (0, 3.5),
'randomize_position_on_reset': False,
'render_size': 84,
'boundary_dist': 4,
'action_limit': 1.0,
'show_goal': False
}
)
register_custom_envs()
| 27.007576
| 68
| 0.501543
| 333
| 3,565
| 5.111111
| 0.24024
| 0.032902
| 0.070505
| 0.084606
| 0.740306
| 0.740306
| 0.740306
| 0.73678
| 0.73678
| 0.59577
| 0
| 0.041871
| 0.370266
| 3,565
| 131
| 69
| 27.21374
| 0.716258
| 0
| 0
| 0.636364
| 0
| 0
| 0.360449
| 0.129313
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008264
| false
| 0
| 0.016529
| 0
| 0.033058
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
861ffd1f8c52997d9ec4325c68e4d23aec753915
| 343
|
py
|
Python
|
spearmint/sampling/__init__.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | 1,590
|
2015-01-02T19:11:29.000Z
|
2022-03-31T13:36:16.000Z
|
spearmint/sampling/__init__.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | 99
|
2015-02-20T06:45:49.000Z
|
2021-12-06T13:28:44.000Z
|
spearmint/sampling/__init__.py
|
fernandezdaniel/Spearmint
|
3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84
|
[
"RSA-MD"
] | 366
|
2015-01-17T20:29:49.000Z
|
2022-02-21T16:22:31.000Z
|
from abstract_sampler import AbstractSampler
from slice_sampler import SliceSampler
from whitened_prior_slice_sampler import WhitenedPriorSliceSampler
from elliptical_slice_sampler import EllipticalSliceSampler
__all__ = ["AbstractSampler", "SliceSampler", "WhitenedPriorSliceSampler", "EllipticalSliceSampler"]
| 57.166667
| 100
| 0.804665
| 28
| 343
| 9.464286
| 0.464286
| 0.196226
| 0.203774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154519
| 343
| 6
| 100
| 57.166667
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0.215116
| 0.136628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8658813448afef752d768bb87e84d83a145883b7
| 79
|
py
|
Python
|
encrypt/reverse_encrypt.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
encrypt/reverse_encrypt.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
encrypt/reverse_encrypt.py
|
edgells/python-commons
|
38c0aa0ec10304a4147ea231c92c9e34da462052
|
[
"MIT"
] | null | null | null |
target_str = "hello python world"
# reverse encrypt
print(target_str[-1::-1])
| 15.8
| 33
| 0.721519
| 12
| 79
| 4.583333
| 0.75
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.126582
| 79
| 4
| 34
| 19.75
| 0.768116
| 0.189873
| 0
| 0
| 0
| 0
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
869724dbb9d849f8284d2b7696d35a28fb3d893d
| 2,811
|
py
|
Python
|
z2/part3/updated_part2_batch/jm/parser_errors_2/193497097.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/193497097.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part3/updated_part2_batch/jm/parser_errors_2/193497097.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 193497097
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 4, 3, 5)
assert board is not None
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_free_fields(board, 2) == 9
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 1) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_busy_fields(board, 1) == 3
assert gamma_golden_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 1, 2, 0) == 0
board142535448 = gamma_board(board)
assert board142535448 is not None
assert board142535448 == ("1..\n" ".12\n" "213\n" "312\n")
del board142535448
board142535448 = None
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_golden_possible(board, 2) == 1
board838097404 = gamma_board(board)
assert board838097404 is not None
assert board838097404 == ("1..\n" ".12\n" "213\n" "312\n")
del board838097404
board838097404 = None
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 1, 2) == 0
board284563281 = gamma_board(board)
assert board284563281 is not None
assert board284563281 == ("11.\n" ".12\n" "213\n" "312\n")
del board284563281
board284563281 = None
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_golden_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_free_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 0) == 0
board886503188 = gamma_board(board)
assert board886503188 is not None
assert board886503188 == ("11.\n" ".12\n" "213\n" "312\n")
del board886503188
board886503188 = None
gamma_delete(board)
| 28.683673
| 58
| 0.695126
| 483
| 2,811
| 3.886128
| 0.086957
| 0.257858
| 0.247736
| 0.330314
| 0.626532
| 0.582312
| 0.540757
| 0.336708
| 0.291955
| 0.256793
| 0
| 0.163352
| 0.159374
| 2,811
| 97
| 59
| 28.979381
| 0.630978
| 0
| 0
| 0.181818
| 0
| 0
| 0.029379
| 0
| 0
| 0
| 0
| 0
| 0.688312
| 1
| 0
| false
| 0
| 0.012987
| 0
| 0.012987
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
86cf5ff1f1bec5e1c0d650d428c95182f9c4dc59
| 295
|
py
|
Python
|
Participants/Whos_Rem/main/__init__.py
|
python-discord/game-jam-2020
|
cdcb1c2c9246825518a29b05cc39a679b36d666e
|
[
"MIT"
] | 15
|
2020-04-17T12:02:14.000Z
|
2022-03-16T03:01:34.000Z
|
Participants/Whos_Rem/main/__init__.py
|
python-discord/game-jam-2020
|
cdcb1c2c9246825518a29b05cc39a679b36d666e
|
[
"MIT"
] | 9
|
2020-04-25T01:57:16.000Z
|
2020-04-29T11:42:34.000Z
|
Participants/Whos_Rem/main/__init__.py
|
python-discord/game-jam-2020
|
cdcb1c2c9246825518a29b05cc39a679b36d666e
|
[
"MIT"
] | 55
|
2020-04-17T12:01:11.000Z
|
2021-12-28T10:14:02.000Z
|
from .settings import Settings
from .main_menu import MainMenu
from .song_select import SongSelection
from .perspective_objects import Shape, ShapeManager
from .display.input_tools import Button, Slider
from .display.utility import ListFunctions, ColourBlend
from .play_screen import GameScreen
| 36.875
| 55
| 0.854237
| 38
| 295
| 6.5
| 0.631579
| 0.089069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105085
| 295
| 7
| 56
| 42.142857
| 0.935606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.