hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
486f0a731365bfc5a6b660c0054e3908325c503d
22
py
Python
tests/gtrtest/__init__.py
plewis/phycas
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
[ "MIT" ]
3
2015-09-24T23:12:57.000Z
2021-04-12T07:07:01.000Z
tests/gtrtest/__init__.py
plewis/phycas
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
[ "MIT" ]
null
null
null
tests/gtrtest/__init__.py
plewis/phycas
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
[ "MIT" ]
1
2015-11-23T10:35:43.000Z
2015-11-23T10:35:43.000Z
from gtrtest import *
11
21
0.772727
3
22
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
1
22
22
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6fa18635fb3f62309855602f9018e6aa8f8fb09f
187
py
Python
app/source/geo_django_rf/restapi/views/__init__.py
JanNash/geo-django-rf-server
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
[ "BSD-3-Clause" ]
null
null
null
app/source/geo_django_rf/restapi/views/__init__.py
JanNash/geo-django-rf-server
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
[ "BSD-3-Clause" ]
null
null
null
app/source/geo_django_rf/restapi/views/__init__.py
JanNash/geo-django-rf-server
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
[ "BSD-3-Clause" ]
null
null
null
from .user_view_set import UserViewSet from .group_view_set import GroupViewSet from .profile_view_set import ProfileViewSet __all__ = ['UserViewSet', 'GroupViewSet', 'ProfileViewSet']
26.714286
59
0.823529
22
187
6.545455
0.5
0.145833
0.270833
0
0
0
0
0
0
0
0
0
0.101604
187
6
60
31.166667
0.857143
0
0
0
0
0
0.197861
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
6fe5d3fc1678d65fd18e8ebb5876204bf458458b
46
py
Python
script.py
jonmabale/automate-the-boring-stuff
e9057ca38c8c59a423fcea88f362140bd40ebf5e
[ "MIT" ]
null
null
null
script.py
jonmabale/automate-the-boring-stuff
e9057ca38c8c59a423fcea88f362140bd40ebf5e
[ "MIT" ]
null
null
null
script.py
jonmabale/automate-the-boring-stuff
e9057ca38c8c59a423fcea88f362140bd40ebf5e
[ "MIT" ]
null
null
null
#! /usr/local/bin/env python3 # Sandbox file
11.5
29
0.695652
7
46
4.571429
1
0
0
0
0
0
0
0
0
0
0
0.025641
0.152174
46
3
30
15.333333
0.794872
0.891304
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
6ff2d7e0b80ef42bea493ae5d57e41f4e2684a01
132
py
Python
project/learning/admin.py
DmitrySevostianov/learning_test_project
f41eb38283a572ee6d11ee3b99da8eebab039d89
[ "MIT" ]
null
null
null
project/learning/admin.py
DmitrySevostianov/learning_test_project
f41eb38283a572ee6d11ee3b99da8eebab039d89
[ "MIT" ]
null
null
null
project/learning/admin.py
DmitrySevostianov/learning_test_project
f41eb38283a572ee6d11ee3b99da8eebab039d89
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Course, Lesson admin.site.register(Course) admin.site.register(Lesson)
18.857143
35
0.772727
18
132
5.666667
0.555556
0.176471
0.333333
0
0
0
0
0
0
0
0
0
0.143939
132
6
36
22
0.902655
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b5002230c75bb8e0da487ac3ad22219ca2331baa
8,740
py
Python
data.py
pkumar0508/project-euler
ffe91e7d02173142d01b45dded487b13582010fb
[ "Apache-2.0" ]
null
null
null
data.py
pkumar0508/project-euler
ffe91e7d02173142d01b45dded487b13582010fb
[ "Apache-2.0" ]
null
null
null
data.py
pkumar0508/project-euler
ffe91e7d02173142d01b45dded487b13582010fb
[ "Apache-2.0" ]
null
null
null
def parse_grid(s): return [[int(x) for x in row.split()] for row in s.strip().split('\n')] def parse_long_string(s): return ''.join(s.strip().split('\n')) def parse_number_list(s): return [int(x) for x in s.strip().split('\n')] problem0008 = ''' 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450 ''' problem0011 = ''' 08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48 ''' problem0013 = ''' 37107287533902102798797998220837590246510135740250 46376937677490009712648124896970078050417018260538 74324986199524741059474233309513058123726617309629 91942213363574161572522430563301811072406154908250 23067588207539346171171980310421047513778063246676 89261670696623633820136378418383684178734361726757 28112879812849979408065481931592621691275889832738 44274228917432520321923589422876796487670272189318 47451445736001306439091167216856844588711603153276 70386486105843025439939619828917593665686757934951 62176457141856560629502157223196586755079324193331 64906352462741904929101432445813822663347944758178 92575867718337217661963751590579239728245598838407 58203565325359399008402633568948830189458628227828 80181199384826282014278194139940567587151170094390 35398664372827112653829987240784473053190104293586 86515506006295864861532075273371959191420517255829 71693888707715466499115593487603532921714970056938 54370070576826684624621495650076471787294438377604 53282654108756828443191190634694037855217779295145 36123272525000296071075082563815656710885258350721 45876576172410976447339110607218265236877223636045 17423706905851860660448207621209813287860733969412 81142660418086830619328460811191061556940512689692 51934325451728388641918047049293215058642563049483 62467221648435076201727918039944693004732956340691 15732444386908125794514089057706229429197107928209 55037687525678773091862540744969844508330393682126 18336384825330154686196124348767681297534375946515 80386287592878490201521685554828717201219257766954 78182833757993103614740356856449095527097864797581 16726320100436897842553539920931837441497806860984 48403098129077791799088218795327364475675590848030 87086987551392711854517078544161852424320693150332 59959406895756536782107074926966537676326235447210 69793950679652694742597709739166693763042633987085 41052684708299085211399427365734116182760315001271 65378607361501080857009149939512557028198746004375 35829035317434717326932123578154982629742552737307 94953759765105305946966067683156574377167401875275 88902802571733229619176668713819931811048770190271 25267680276078003013678680992525463401061632866526 36270218540497705585629946580636237993140746255962 24074486908231174977792365466257246923322810917141 91430288197103288597806669760892938638285025333403 34413065578016127815921815005561868836468420090470 23053081172816430487623791969842487255036638784583 11487696932154902810424020138335124462181441773470 63783299490636259666498587618221225225512486764533 67720186971698544312419572409913959008952310058822 95548255300263520781532296796249481641953868218774 76085327132285723110424803456124867697064507995236 37774242535411291684276865538926205024910326572967 23701913275725675285653248258265463092207058596522 29798860272258331913126375147341994889534765745501 18495701454879288984856827726077713721403798879715 38298203783031473527721580348144513491373226651381 34829543829199918180278916522431027392251122869539 40957953066405232632538044100059654939159879593635 29746152185502371307642255121183693803580388584903 41698116222072977186158236678424689157993532961922 62467957194401269043877107275048102390895523597457 23189706772547915061505504953922979530901129967519 86188088225875314529584099251203829009407770775672 11306739708304724483816533873502340845647058077308 82959174767140363198008187129011875491310547126581 97623331044818386269515456334926366572897563400500 42846280183517070527831839425882145521227251250327 55121603546981200581762165212827652751691296897789 32238195734329339946437501907836945765883352399886 75506164965184775180738168837861091527357929701337 62177842752192623401942399639168044983993173312731 32924185707147349566916674687634660915035914677504 99518671430235219628894890102423325116913619626622 73267460800591547471830798392868535206946944540724 76841822524674417161514036427982273348055556214818 97142617910342598647204516893989422179826088076852 87783646182799346313767754307809363333018982642090 10848802521674670883215120185883543223812876952786 71329612474782464538636993009049310363619763878039 62184073572399794223406235393808339651327408011116 66627891981488087797941876876144230030984490851411 60661826293682836764744779239180335110989069790714 85786944089552990653640447425576083659976645795096 66024396409905389607120198219976047599490197230297 64913982680032973156037120041377903785566085089252 16730939319872750275468906903707539413042652315011 94809377245048795150954100921645863754710598436791 78639167021187492431995700641917969777599028300699 15368713711936614952811305876380278410754449733078 40789923115535562561142322423255033685442488917353 44889911501440648020369068063960672322193204149535 41503128880339536053299340368006977710650566631954 81234880673210146739058568557934581403627822703280 82616570773948327592232845941706525094512325230608 22918802058777319719839450180888072429661980811197 77158542502016545090413245809786882778948721859617 72107838435069186155435662884062257473692284509516 20849603980134001723930671666823555245252804609722 53503534226472524250874054075591789781264330331690 ''' problem0018 = ''' 75 95 64 17 47 82 18 35 87 10 20 04 82 47 65 19 01 23 75 03 34 88 02 77 73 07 63 67 99 65 04 28 06 16 70 92 41 41 26 56 83 40 80 70 33 41 48 72 33 47 32 37 16 94 29 53 71 44 65 25 43 91 52 97 51 14 70 11 33 28 77 73 17 78 39 68 17 57 91 71 52 38 17 14 91 43 58 50 27 29 48 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23 ''' def readCipher1(): with open('cipher1.txt') as f: lines = f.readlines() return [int(x) for x in lines[0].strip().split(',')] def readRoman(): with open('roman.txt') as f: lines = f.readlines() return [x.strip() for x in lines] def readkeylog(): with open('keylog.txt') as f: lines = f.readlines() return [x.strip() for x in lines] def readpoker(): with open('poker.txt') as f: lines = f.readlines() return [(x.split()[:5], x.split()[5:]) for x in lines]
44.365482
60
0.861899
768
8,740
9.802083
0.33724
0.003188
0.004782
0.005845
0.032811
0.030951
0.024309
0.016206
0.012487
0.012487
0
0.915942
0.117963
8,740
196
61
44.591837
0.060579
0
0
0.053763
0
0
0.904728
0.702247
0
1
0
0
0
1
0.037634
false
0
0
0.016129
0.075269
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
5
b5158a4cc4c39f7f232fb11d618525122361b4ff
19,645
py
Python
tests/integration/list_submission_tests.py
dael-victoria-reyes/data-act-broker-backend
f83c7cad29cac24d95f45a262710dc1564de7dc1
[ "CC0-1.0" ]
1
2019-06-22T21:53:16.000Z
2019-06-22T21:53:16.000Z
tests/integration/list_submission_tests.py
dael-victoria-reyes/data-act-broker-backend
f83c7cad29cac24d95f45a262710dc1564de7dc1
[ "CC0-1.0" ]
null
null
null
tests/integration/list_submission_tests.py
dael-victoria-reyes/data-act-broker-backend
f83c7cad29cac24d95f45a262710dc1564de7dc1
[ "CC0-1.0" ]
null
null
null
from dataactcore.interfaces.db import GlobalDB from dataactcore.models.userModel import User from dataactcore.models.lookups import PUBLISH_STATUS_DICT, FILE_TYPE_DICT, FILE_STATUS_DICT, JOB_TYPE_DICT from dataactvalidator.health_check import create_app from tests.integration.baseTestAPI import BaseTestAPI from tests.integration.integration_test_helper import insert_submission, insert_job class ListSubmissionTests(BaseTestAPI): """ Test list submissions endpoint """ @classmethod def setUpClass(cls): """Set up class-wide resources (test data)""" super(ListSubmissionTests, cls).setUpClass() # TODO: refactor into a pytest fixture with create_app().app_context(): # get an admin and non-admin user sess = GlobalDB.db().session cls.session = sess admin_user = sess.query(User).filter(User.email == cls.test_users['admin_user']).one() cls.admin_user_id = admin_user.user_id other_user = sess.query(User).filter(User.email == cls.test_users['agency_user']).one() cls.other_user_id = other_user.user_id # set up submissions for dabs cls.non_admin_dabs_sub_id = insert_submission(sess, cls.other_user_id, cgac_code="SYS", start_date="10/2015", end_date="12/2015", is_quarter=True, is_fabs=False, publish_status_id=PUBLISH_STATUS_DICT['unpublished'], updated_at='01/01/2010') cls.admin_dabs_sub_id = insert_submission(sess, cls.admin_user_id, cgac_code="000", start_date="10/2015", end_date="12/2015", is_quarter=True, is_fabs=False, publish_status_id=PUBLISH_STATUS_DICT['unpublished'], updated_at='01/01/2012') cls.certified_dabs_sub_id = insert_submission(sess, cls.admin_user_id, cgac_code="SYS", start_date="10/2015", end_date="12/2015", is_quarter=True, is_fabs=False, publish_status_id=PUBLISH_STATUS_DICT['published']) # Add a couple jobs for dabs files insert_job(sess, FILE_TYPE_DICT['appropriations'], FILE_STATUS_DICT['complete'], JOB_TYPE_DICT['file_upload'], cls.non_admin_dabs_sub_id, filename='/path/to/test/file_1.csv', file_size=123, num_rows=3) insert_job(sess, FILE_TYPE_DICT['award'], FILE_STATUS_DICT['complete'], JOB_TYPE_DICT['file_upload'], cls.non_admin_dabs_sub_id, filename='/path/to/test/file_2.csv', file_size=123, num_rows=3) insert_job(sess, FILE_TYPE_DICT['award'], FILE_STATUS_DICT['complete'], JOB_TYPE_DICT['file_upload'], cls.certified_dabs_sub_id, filename='/path/to/test/file_part_2.csv', file_size=123, num_rows=3) # set up submissions for fabs cls.non_admin_fabs_sub_id = insert_submission(sess, cls.admin_user_id, cgac_code="SYS", start_date="10/2015", end_date="12/2015", is_fabs=True, publish_status_id=PUBLISH_STATUS_DICT['unpublished']) cls.admin_fabs_sub_id = insert_submission(sess, cls.other_user_id, cgac_code="000", start_date="10/2015", end_date="12/2015", is_fabs=True, publish_status_id=PUBLISH_STATUS_DICT['unpublished']) cls.published_fabs_sub_id = insert_submission(sess, cls.other_user_id, cgac_code="000", start_date="10/2015", end_date="12/2015", is_fabs=True, publish_status_id=PUBLISH_STATUS_DICT['published']) # Add a job for a FABS submission insert_job(sess, FILE_TYPE_DICT['fabs'], FILE_STATUS_DICT['complete'], JOB_TYPE_DICT['file_upload'], cls.admin_fabs_sub_id, filename=str(cls.admin_fabs_sub_id) + '/test_file.csv', file_size=123, num_rows=3) def setUp(self): """ Test set-up. """ super(ListSubmissionTests, self).setUp() self.login_admin_user() def sub_ids(self, response): """ Helper function to parse out the submission ids from an HTTP response. """ self.assertEqual(response.status_code, 200) result = response.json self.assertIn('submissions', result) return {sub['submission_id'] for sub in result['submissions']} def test_list_submissions_dabs_admin(self): """ Test with DABS submissions for an admin user. """ response = self.app.post_json("/v1/list_submissions/", {"certified": "mixed"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.admin_dabs_sub_id, self.certified_dabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "false"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.admin_dabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "true"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.certified_dabs_sub_id}) def test_list_submissions_dabs_non_admin(self): """ Test with DABS submissions for a non admin user. """ self.login_user() response = self.app.post_json("/v1/list_submissions/", {"certified": "mixed"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.admin_dabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "false"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.admin_dabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "true"}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) def test_list_submissions_fabs_admin(self): """ Test with FABS submissions for an admin user. """ response = self.app.post_json("/v1/list_submissions/", {"certified": "mixed", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_fabs_sub_id, self.admin_fabs_sub_id, self.published_fabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "false", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_fabs_sub_id, self.admin_fabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "true", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.published_fabs_sub_id}) def test_list_submissions_fabs_non_admin(self): """ Test with FABS submissions for a non admin user. """ self.login_user() response = self.app.post_json("/v1/list_submissions/", {"certified": "mixed", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.admin_fabs_sub_id, self.published_fabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "false", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.admin_fabs_sub_id}) response = self.app.post_json("/v1/list_submissions/", {"certified": "true", "fabs": True}, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.published_fabs_sub_id}) def test_list_submissions_filter_id(self): """ Test listing submissions with a submission_id filter applied. """ # Listing only the relevant submissions, even when an ID is provided that can't be reached post_json = { "certified": "mixed", "filters": { "submission_ids": [self.non_admin_dabs_sub_id, self.admin_fabs_sub_id] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id}) self.login_user() # Not returning a result if the user doesn't have access to the submission post_json["filters"] = { "submission_ids": [self.certified_dabs_sub_id] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) def test_list_submissions_filter_date(self): """ Test listing submissions with a start and end date filter applied. """ # Listing only submissions that have been updated in the time frame post_json = { "certified": "mixed", "filters": { "last_modified_range": { "start_date": '12/31/2009', "end_date": '01/30/2010' } } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id}) # Time frame with no submission updates post_json["filters"] = { "last_modified_range": { "start_date": '12/31/2010', "end_date": '01/30/2011' } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) # One day date range (shows inclusivity) post_json["filters"] = { "last_modified_range": { "start_date": '01/01/2010', "end_date": '01/01/2010' } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id}) # Breaks if one of the date filters isn't provided and the other is post_json["filters"] = { "last_modified_range": { "start_date": '01/01/2010' } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "Both start_date and end_date must be provided") # Breaks if date isn't valid post_json["filters"] = { "last_modified_range": { "start_date": '30/30/2010', "end_date": '01/01/2010' } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "Start or end date cannot be parsed into a date of format " "MM/DD/YYYY") # Breaks if start date is after end date post_json["filters"] = { "last_modified_range": { "start_date": '01/02/2010', "end_date": '01/01/2010' } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "Last modified start date cannot be greater than the end date") # Breaks if last_modified_range isn't an object post_json["filters"] = { "last_modified_range": [123, 456] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "last_modified_range filter must be null or an object") def test_list_submissions_filter_agency(self): """ Test listing submissions with an agency_code filter applied. """ # Listing only the relevant submissions post_json = { "certified": "mixed", "filters": { "agency_codes": ['000'] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.admin_dabs_sub_id}) self.login_user() # Not returning a result if the user doesn't have access to the submission post_json = { "certified": "mixed", "fabs": True, "filters": { "agency_codes": ['SYS'] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) self.login_admin_user() # Invalid agency code, valid length post_json = { "certified": "mixed", "filters": { "agency_codes": ['111'] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "All codes in the agency_codes filter must be valid agency codes") # Invalid agency code, wrong length post_json["filters"] = { "agency_codes": ['12345'] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "All codes in the agency_codes filter must be valid agency codes") # Invalid agency code, contains non-string post_json["filters"] = { "agency_codes": [['123', '456', '789'], 'SYS'] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "All codes in the agency_codes filter must be valid agency codes") # Non-array being passed over post_json["filters"] = { "agency_codes": 'SYS' } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "agency_codes filter must be null or an array") def test_list_submissions_filter_filename(self): """ Test listing submissions with an file_names filter applied. """ # List only submissions with job files post_json = { "certified": "mixed", "filters": { "file_names": ['file'] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.certified_dabs_sub_id}) # Not returning a result if the string doesn't exist in a file name (even if it exists in a path to it) post_json["filters"] = { "file_names": ['test'] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) # Returning both submissions if each has even one job that matches one of the given strings (testing multiple) post_json["filters"] = { "file_names": ['part', '_1'] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id, self.certified_dabs_sub_id}) # Non-array being passed over (error) post_json["filters"] = { "file_names": 'part' } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}, expect_errors=True) self.assertEqual(response.status_code, 400) self.assertEqual(response.json["message"], "file_names filter must be null or an array") # non-local style submission post_json = { "certified": "mixed", "fabs": True, "filters": { "file_names": ['test'] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.admin_fabs_sub_id}) # Ignores the ID (despite it being part of the file path, but not the name) post_json["filters"] = { "file_names": [str(self.admin_fabs_sub_id)] } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), set()) def test_list_submissions_filter_user_id(self): """ Test listing submissions with a user_id filter applied. """ # Listing only the relevant submissions, even when an ID is provided that can't be reached post_json = { "certified": "mixed", "filters": { "user_ids": [self.other_user_id, -1] } } response = self.app.post_json("/v1/list_submissions/", post_json, headers={"x-session-id": self.session_id}) self.assertEqual(self.sub_ids(response), {self.non_admin_dabs_sub_id})
52.386667
118
0.586358
2,332
19,645
4.684391
0.096484
0.057122
0.070212
0.059136
0.811424
0.774167
0.745789
0.713933
0.687935
0.681618
0
0.021363
0.294681
19,645
374
119
52.526738
0.767032
0.104759
0
0.558719
0
0
0.179785
0.045247
0
0
0
0.002674
0.160142
1
0.042705
false
0
0.021352
0
0.071174
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d23da04e2ddd1fbd3ec8bd92fa0df474bf58c46a
893
py
Python
pava/implementation/natives/sun/java2d/loops/GraphicsPrimitiveMgr.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
4
2017-03-30T16:51:16.000Z
2020-10-05T12:25:47.000Z
pava/implementation/natives/sun/java2d/loops/GraphicsPrimitiveMgr.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
null
null
null
pava/implementation/natives/sun/java2d/loops/GraphicsPrimitiveMgr.py
laffra/pava
54d10cf7f8def2f96e254c0356623d08f221536f
[ "MIT" ]
null
null
null
def add_native_methods(clazz): def initIDs__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11): raise NotImplementedError() def registerNativeLoops____(a0): raise NotImplementedError() clazz.initIDs__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__ = staticmethod(initIDs__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__java_lang_Class__) clazz.registerNativeLoops____ = staticmethod(registerNativeLoops____)
81.181818
419
0.894737
133
893
4.864662
0.18797
0.408037
0.66306
0.788253
0.695518
0.695518
0.695518
0.695518
0.695518
0.695518
0
0.018051
0.069429
893
10
420
89.3
0.760529
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0.428571
false
0
0
0
0.428571
0
0
0
0
null
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
d242ff5b2251944490e665b0435d408a0f3a23e6
48
py
Python
src/tox_travis/__init__.py
Djailla/tox-travis
dc3d05c72822a45d48946b380be680039a9c2f50
[ "MIT" ]
117
2015-05-22T16:10:37.000Z
2017-09-15T17:15:12.000Z
src/tox_travis/__init__.py
Djailla/tox-travis
dc3d05c72822a45d48946b380be680039a9c2f50
[ "MIT" ]
89
2017-09-20T18:17:47.000Z
2021-01-04T21:39:40.000Z
src/tox_travis/__init__.py
Djailla/tox-travis
dc3d05c72822a45d48946b380be680039a9c2f50
[ "MIT" ]
32
2017-10-29T23:32:13.000Z
2022-02-16T11:52:43.000Z
"""Make it easy to work with Tox and Travis."""
24
47
0.666667
9
48
3.555556
1
0
0
0
0
0
0
0
0
0
0
0
0.1875
48
1
48
48
0.820513
0.854167
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d2785244fb0e3480f011b5c2f22b31bc3ed1dbec
53
py
Python
ex3.py
Cloudlie/pythonlearning
347a2ea3b85450139e0718aec37ddf6998bd5678
[ "MIT" ]
null
null
null
ex3.py
Cloudlie/pythonlearning
347a2ea3b85450139e0718aec37ddf6998bd5678
[ "MIT" ]
null
null
null
ex3.py
Cloudlie/pythonlearning
347a2ea3b85450139e0718aec37ddf6998bd5678
[ "MIT" ]
null
null
null
print 'I will now count:' print 10 / 3 print 10.0/3
13.25
25
0.660377
12
53
2.916667
0.666667
0.4
0
0
0
0
0
0
0
0
0
0.170732
0.226415
53
3
26
17.666667
0.682927
0
0
0
0
0
0.320755
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
9627cc96d03d7d77333110a01623e996804293d3
127
py
Python
pytorch_h5dataset/utils.py
CeadeS/PyTorchH5Dataset
9ee6e49f2a780345abd708abf2e0c47bb5475e0a
[ "BSD-3-Clause" ]
null
null
null
pytorch_h5dataset/utils.py
CeadeS/PyTorchH5Dataset
9ee6e49f2a780345abd708abf2e0c47bb5475e0a
[ "BSD-3-Clause" ]
null
null
null
pytorch_h5dataset/utils.py
CeadeS/PyTorchH5Dataset
9ee6e49f2a780345abd708abf2e0c47bb5475e0a
[ "BSD-3-Clause" ]
null
null
null
from torch.nn import Module class NormImageUint8ToFloat(Module): def forward(self, im) : return 2.*((im/255.)-.5)
25.4
36
0.661417
17
127
4.941176
0.882353
0
0
0
0
0
0
0
0
0
0
0.058824
0.19685
127
5
37
25.4
0.764706
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
9646deeed2d5bda54fb5df2def9304428850767b
381
py
Python
DeepLearning/Python/Chapter 4/Ch04-04-02-diff.py
BlueWay-KU/Study
a86405cdc3011eaed1b980b562b75df1e9ce90a8
[ "MIT" ]
null
null
null
DeepLearning/Python/Chapter 4/Ch04-04-02-diff.py
BlueWay-KU/Study
a86405cdc3011eaed1b980b562b75df1e9ce90a8
[ "MIT" ]
null
null
null
DeepLearning/Python/Chapter 4/Ch04-04-02-diff.py
BlueWay-KU/Study
a86405cdc3011eaed1b980b562b75df1e9ce90a8
[ "MIT" ]
null
null
null
def numerical_diff(f, x): h = 1e-4 return (f(x+h) - f(x-h)) / (2*h) def function_1(x): return 0.01*x**2 + 0.1*x def function_2(x): return x[0]**2 + x[1]**2 def function_tmp1(x0): return x0*x0 + 4.0**2.0 print(numerical_diff(function_tmp1, 3.0)) def function_tmp2(x1): return 3.0**2.0 + x1*x1 print(numerical_diff(function_tmp2, 4.0))
20.052632
42
0.585302
76
381
2.815789
0.263158
0.205607
0.042056
0.242991
0
0
0
0
0
0
0
0.12585
0.228346
381
19
43
20.052632
0.602041
0
0
0
0
0
0
0
0
0
0
0
0
1
0.384615
false
0
0
0.307692
0.769231
0.153846
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
964ad81d4e7347ced2c0354c2533c9005bb9b08f
279
py
Python
napari/layers/image/__init__.py
MaksHess/napari
64a144607342c02177fc62fa83a3442ace0a98e7
[ "BSD-3-Clause" ]
1,345
2019-03-03T21:14:14.000Z
2022-03-31T19:46:39.000Z
napari/layers/image/__init__.py
MaksHess/napari
64a144607342c02177fc62fa83a3442ace0a98e7
[ "BSD-3-Clause" ]
3,904
2019-03-02T01:30:24.000Z
2022-03-31T20:17:27.000Z
napari/layers/image/__init__.py
MaksHess/napari
64a144607342c02177fc62fa83a3442ace0a98e7
[ "BSD-3-Clause" ]
306
2019-03-29T17:09:10.000Z
2022-03-30T09:54:11.000Z
from . import _image_key_bindings from .image import Image # Note that importing _image_key_bindings is needed as the Image layer gets # decorated with keybindings during that process, but it is not directly needed # by our users and so is deleted below del _image_key_bindings
34.875
79
0.817204
46
279
4.76087
0.673913
0.109589
0.219178
0
0
0
0
0
0
0
0
0
0.16129
279
7
80
39.857143
0.935897
0.673835
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9660f7b2a815a98e1362dbf8b9bc9edd3bbbda07
1,081
py
Python
python/test/test_permissions_api.py
openlattice/api-clients
1d5be9861785b295089b732f37464e31bf80c8ca
[ "Apache-2.0" ]
null
null
null
python/test/test_permissions_api.py
openlattice/api-clients
1d5be9861785b295089b732f37464e31bf80c8ca
[ "Apache-2.0" ]
1
2021-01-20T00:20:01.000Z
2021-01-20T00:20:01.000Z
python/test/test_permissions_api.py
openlattice/api-clients
1d5be9861785b295089b732f37464e31bf80c8ca
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ OpenLattice API OpenLattice API # noqa: E501 The version of the OpenAPI document: 0.0.1 Contact: support@openlattice.com Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import openlattice from openlattice.api.permissions_api import PermissionsApi # noqa: E501 from openlattice.rest import ApiException class TestPermissionsApi(unittest.TestCase): """PermissionsApi unit test stubs""" def setUp(self): self.api = openlattice.api.permissions_api.PermissionsApi() # noqa: E501 def tearDown(self): pass def test_get_acl(self): """Test case for get_acl Get the ACL for the given ACL Key, only if the user is the owner of the ACL Key. # noqa: E501 """ pass def test_update_acl(self): """Test case for update_acl Updates the ACL for a particular ACL Key, only if the user is the owner of the ACL Key. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
22.061224
109
0.668825
144
1,081
4.875
0.409722
0.05698
0.048433
0.079772
0.19943
0.148148
0.148148
0.148148
0.148148
0.148148
0
0.023457
0.250694
1,081
48
110
22.520833
0.84321
0.445883
0
0.1875
1
0
0.015385
0
0
0
0
0
0
1
0.25
false
0.1875
0.3125
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
1
1
0
1
0
0
5
9669c2ee2dc216abe33c57e73b6f8bf2d6511f51
66
py
Python
ladyns/__init__.py
Qi-Xin/ladyns
b4eb4ae9ee4184283bd26c6951bc4c614fc02351
[ "MIT" ]
1
2022-03-28T19:57:44.000Z
2022-03-28T19:57:44.000Z
ladyns/__init__.py
Qi-Xin/ladyns
b4eb4ae9ee4184283bd26c6951bc4c614fc02351
[ "MIT" ]
null
null
null
ladyns/__init__.py
Qi-Xin/ladyns
b4eb4ae9ee4184283bd26c6951bc4c614fc02351
[ "MIT" ]
1
2021-06-14T23:15:57.000Z
2021-06-14T23:15:57.000Z
from ladyns.estimate import * import ladyns.inference as inference
33
36
0.848485
9
66
6.222222
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.106061
66
2
36
33
0.949153
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
968b28cdc862b0021a7216beecd36e6a3c374aa5
10,139
py
Python
config/playbooks/vault/settings_production_secrets.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
14
2015-10-03T07:34:28.000Z
2021-09-20T07:10:29.000Z
config/playbooks/vault/settings_production_secrets.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
23
2019-10-25T08:47:23.000Z
2022-01-30T02:00:45.000Z
config/playbooks/vault/settings_production_secrets.py
aehlke/manabi
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
[ "MIT" ]
7
2016-10-04T08:10:36.000Z
2021-09-20T07:10:33.000Z
$ANSIBLE_VAULT;1.1;AES256 38626436333733663864343661643664646437396461343138666565323338643432373732643237 3938633139386566333330643833313633306131383131390a643734623462383766613165383436 37646435396665636365643963323330656438613130353866383939336561333033313863623135 3639643664396131610a393566623532613435383934313837313439316339326463613535303234 63353063646465323865636231623832616635313064323061633632653135663038323436366231 39653161383939366533643831643938313931353366326238653061376335353236393730626663 30356632663961636264613464393838373463643937653838373666623639313665343837623434 63653761363231623864643438376634636562376433336261353332663662366135353936623637 30656138343561613633343138313332356139383662303562346635643330643437323662653134 61353563663066326639306637356662633733303661396363386564653833333930376138303637 66333339373130333263623031393066623233376162393033656239396464613339633031373130 61666265356231636537626239653838376132323532326464373737663439353562323564326536 65643034633633666435633738313234653037353434356265383563623465633835343138366335 39343838636639336531643233653835643566636432363038666432333437626566343639646333 65383632643430376139356161326561323566343937656330613133616539356562666565626539 30613738663763643064666633313561356465653762653339636430336230623232626630636265 39643136343637663139653731323566333833333236343261613138393035623636626462383138 63383438373666316563326461343135363065383164656235643266623066366366333838633634 63396565346638646637386135643632376437386132666435363239393863643361363335373139 61376566643733326263353736663339373136343733393165363961636330613764623739363932 36613130303562636438356562323466353539306166386534643862326562336166323363363035 36336235366661363265623936363666386164396339633165663930343462636333613832626130 63323433396461303765316665346539316264366165353338386533383736376365663265363264 31323838643939306163626362306630386661636466373833656531306139666664623437643437 39643664633130346235626265363538316536663632346337366565636135323163383962303032 65653463336363376331303062656434643565326633333062393531376661396636626535303837 32386432316433303338343866313330386633336562343866333435343766346637316333613466 66393731383264326431316439346366323161626239393065363634393731373064363835323635 61663233626333343830663330373435303133313062393465316463646237666233313364356632 30633737646665646233663562303765663537363236663463356166386530336466333834373462 62623334373635333366393537613664386362393334343938366335633465396235633933633338 32356538393435356630346563343234313532386132663130366330383639323639343163333736 66396632313931626631383731633431383062663361373033353936623638343838366634306232 37366565306465386165666337313831653062356263363537333038336561633262313563376635 38396165366536666465613634643038316266363135363538646431633835363763336534343365 30313939323236666264396438313466613231363361383539636232363831333535613665383863 30383734333265313632626361333333653562353361623062363135343132306536306361383961 66326636613461323762393733323664626637356662333965613432346366396163653338383137 64626136373762653333313338376530303735313230333163353061396662656664613434376261 37333836346436353064353563373330343634663431633236633637323766616332303964666665 32353734373566356539626435323138333161353262613666376634396262633163646532336162 65633735626139666535663432346262376438313534643435623061636362653432666436316538 65333634303331623465643736326137333965303966346437313534636662366430346632663731 65616630626530393130663236643566396534313838366561323337373135646665656330366666 31333532323266336536306233643832323538643839316433323930616561343432393239353466 35376634303433373436616464353630623838373136376130373664616332383765383830316161 35396337373837333965616431363433613135306530353365356166386361383834616166333039 38376537613532383239636464333031386535636232643864613135663838316138666234373837 61383538656362396335633438386535363135306434373261306365653662663733663739663931 64313661396565316239393039373336353333396562343066313338366133366339663931653266 38643930666539353266393436626232663534666237366431386435363436356335313536626565 32333364666330343530386237333765653466666332636336656236666332326365616366356366 37303632303562346164643630643563303538313331623832303138653065323863383965303635 39396238633830343437393739356266613139656538386565656434333738663766336639363762 63653037316164633062383563666131613063363231393330346432666165313831323962616433 35366461616161613638643338663263623265373836383937653037313863333930626135303636 35333766633330636335626332653837633363396230333130343131643764306236333232393261 30623163633432326232643139633366323233333438376230653938643037316564653335656430 38373435323537633361643233663064653638663837303263656630316130616535303137663263 39646533333265373865333330366439376665643838336430323433336166376532383933333438 39373433616130646531393430366662333633333266383761336435313236663439666535306536 61633530656162366237623961303436643231326263653634346130336133363837626631333238 31353266353538613430653136613734346632333163633937353266343032373234363865653631 38646630616639363931353031626266313939303031623431656630626465326337343161346465 35353863646638306137636632393730313031366131636261383865373034303437643337646431 36643233386363653439313538643834363361623238313334306532386265333432643263666631 61373931356238646634323739396537643936643061313065336163616630303433626632306438 61383931346365306664323938663435393865343966663732353737313730313736623662323163 30636231363538323861613934363861666561306339363464313730316132646238393161653330 36373966333130643931613439393866386364303439663631343533633762633830323235346433 65383265323537633839363239343039333534323061653637633565656162323538373431393662 65653339313761613333623461363566396438323932633937313633353565393462316133623962 33663863333462616162346335363864356565343534373938316463363435623764666564323731 30643166626231363833643930336131373837323738393063653964336465623137386535373463 39643963316433366633363735653338376132393535356465386436343763613539306532316235 35313432363632346435616233656163343365303137323330663335613038653633383039656463 65373963393566313637656339393561613134616263343437626236613831363735383232356439 32396238623562346134663765613736616264633930663631353037313134353832643361373863 30353132363135363830306464366238353936383866633536386332366239336261306530383666 66393761653366336336353563383537393361613830333239646262646564636562343562383138 36303562383135623034643266316364633933323033386362393033656232306362623938346634 35323432366564623633303361326166363262616135633866363639623831376137616663326634 33363337623635343761363165623735626462336164623734623439323833313132633036336539 63323037336137343830313931373162316161623236306665383035623132643131376363353339 65316239376665306335633736383635633966613733343838626134653939323231646330323939 32623634303836616562643466393835373738666265343164653735333933626330633738643233 63363561633665613732343264663734663866373137323930373161663663636630386264356561 37653730316261356432306538616630396539313333656634363063306364363062613364633735 65353938656630646239333139623262306330376364666535646461303336613262646232313135 39316366326636346638316336653439613461643766666336353761616166386531303065303761 32613965313134636535383938303639346539303231376435386338323534633066666263303036 36316166313836613365353332373230656237373766363536613037626165636635656665386131 62353835333365666664363539386130643630396333643833666432313664323434656231356232 35633339343330653034613261643961393630343765343762353936393033653065356532616439 66653166316239376436663063386534663065323536353233313834636234326638386364613739 31353961323831313033663161396366633733373736316433303632386132386432363831336562 36356662643338663263666434306631666562363964643865386264366337323465643630333535 35646432616665646132616266343736313230616664356632343531316664373737643864643730 35356239343564633531666564616435386530653434653038633535633363616165333334323566 35313530393062353266646538636432363231633730373831323132663764663265626638666437 61633466383537346438613537633333363866356131626333653039613534616233353435303633 33643034386162626238336630663933353339316162366263353963363864616138663033323138 30366439356632633537383931313136653061373663316264303536363334333965333138366366 36336433313430613463363063353330623830636462323239323234643730663137346131626236 31646334313531353238306365336432353433646665393531323338373466373936353761363938 61333666646235613533663061376431623232653363356565343566653939626434633166306633 65666233343763656265616337306438323564613565363738623933343533323034373539393038 31666564353439663762336435326263366162336532313436666336616135613437383333396161 38346437303266316337633262316138646631666431343765313234303036363863643036323034 63306539336332373930356132613462653938356537316365643066613639336365313735343332 38623932616563313961373834636266636239313035333264316335303733626266353161386331 62363636623964643532333935626135376661363731386566356532306463343362353232623461 64346162636361613931626537343338626137366464353863386436643832396561366433373565 37326634313762333837363663363432393636303563356366316330613335643630656433393965 35363036326261356131376635373762643363653562376638663132326361636261386633303565 62613135626430373933353839353837613131363933346266393436356662313863376431396437 38623831343336326634623665343931343631333434653834333838373738306164656630656236 65343337353531303765373539653266383232636635653034376630633238303864353935313439 64313765303234313933366436636435346663366533663234313361316537306633626530616233 62363530626334363265323530323536366232623538373963613966663834643165346262663461 31386336363438386130353561313436626362393635636130306264333637643836303139653138 31656130626339653535616666666636636362386565346532373562646231616233316361613064 37353630383936336134653434646439646133353264373537343862383934313964303335306437 63393131373238386362363533373363383464393362666138626434646238316262666631613036 35643536663566633765336264376430626164393666373734303165356161306465
79.834646
80
0.987178
130
10,139
76.984615
0.992308
0
0
0
0
0
0
0
0
0
0
0.997803
0.012427
10,139
126
81
80.468254
0.001698
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
969612e4bc29b4bf5c332687933e52546d948fd2
192
py
Python
tina/pars/base.py
xuhao1/taichi_three
25fdf047da4c93df36a047a0be3cc47225d328c9
[ "MIT" ]
152
2020-06-17T09:08:59.000Z
2022-03-30T13:48:49.000Z
tina/pars/base.py
xuhao1/taichi_three
25fdf047da4c93df36a047a0be3cc47225d328c9
[ "MIT" ]
46
2020-06-20T15:15:57.000Z
2022-03-24T20:03:18.000Z
tina/pars/base.py
xuhao1/taichi_three
25fdf047da4c93df36a047a0be3cc47225d328c9
[ "MIT" ]
27
2020-06-20T14:25:55.000Z
2022-03-12T08:11:31.000Z
from ..common import * @ti.data_oriented class ParsEditBase: def __init__(self, pars): self.pars = pars def __getattr__(self, attr): return getattr(self.pars, attr)
17.454545
39
0.65625
24
192
4.875
0.625
0.205128
0
0
0
0
0
0
0
0
0
0
0.239583
192
10
40
19.2
0.80137
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
96967499a9604b253f13e3ec6374c8c3cbd4096c
372
py
Python
hknweb/candidate/admin/__init__.py
jyxzhang/hknweb
a01ffd8587859bf63c46213be6a0c8b87164a5c2
[ "MIT" ]
null
null
null
hknweb/candidate/admin/__init__.py
jyxzhang/hknweb
a01ffd8587859bf63c46213be6a0c8b87164a5c2
[ "MIT" ]
null
null
null
hknweb/candidate/admin/__init__.py
jyxzhang/hknweb
a01ffd8587859bf63c46213be6a0c8b87164a5c2
[ "MIT" ]
null
null
null
from hknweb.candidate.admin.announcement import AnnouncementAdmin from hknweb.candidate.admin.activities import BitByteActivityAdmin, OffChallengeAdmin from hknweb.candidate.admin.requirements import ( RequirementAdminGeneral, CandidateFormAdmin, MiscRequirementAdmin, RequirementMandatoryAdmin, RequirementMergeAdmin, MiscRequirementEntryAdmin, )
33.818182
85
0.833333
27
372
11.481481
0.62963
0.096774
0.183871
0.232258
0
0
0
0
0
0
0
0
0.11828
372
10
86
37.2
0.945122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.3
0
0.3
0
1
0
1
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
739e79eb281f2735aab4b04c5ac957fb06434bb5
116
py
Python
examples/client-gen/tictactoe/instructions/__init__.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
87
2021-09-26T18:14:07.000Z
2022-03-28T08:22:24.000Z
examples/client-gen/tictactoe/instructions/__init__.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
15
2021-10-07T16:12:23.000Z
2022-03-20T21:04:40.000Z
examples/client-gen/tictactoe/instructions/__init__.py
kevinheavey/anchorpy
d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad
[ "MIT" ]
16
2021-10-16T04:40:28.000Z
2022-03-18T16:49:40.000Z
from .setup_game import setup_game, SetupGameArgs, SetupGameAccounts from .play import play, PlayArgs, PlayAccounts
38.666667
68
0.844828
14
116
6.857143
0.642857
0.1875
0
0
0
0
0
0
0
0
0
0
0.103448
116
2
69
58
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
73b33dc4e6f144d064cbd8fff82be8d2ad2816b6
23,254
py
Python
src/titiler/core/tests/test_factories.py
mackdelany/titiler
b2a76185d96af9aa8b653fd8134bbaa591d637a5
[ "MIT" ]
null
null
null
src/titiler/core/tests/test_factories.py
mackdelany/titiler
b2a76185d96af9aa8b653fd8134bbaa591d637a5
[ "MIT" ]
null
null
null
src/titiler/core/tests/test_factories.py
mackdelany/titiler
b2a76185d96af9aa8b653fd8134bbaa591d637a5
[ "MIT" ]
null
null
null
"""Test TiTiler Tiler Factories.""" import json import os import pathlib from typing import Dict, Type from unittest.mock import patch from urllib.parse import urlencode import attr import morecantile from rio_tiler.io import BaseReader, COGReader, MultiBandReader, STACReader from titiler.core.dependencies import TMSParams, WebMercatorTMSParams from titiler.core.errors import DEFAULT_STATUS_CODES, add_exception_handlers from titiler.core.factory import ( MultiBandTilerFactory, MultiBaseTilerFactory, TilerFactory, TMSFactory, ) from titiler.core.resources.enums import OptionalHeader from .conftest import DATA_DIR, mock_rasterio_open, parse_img from fastapi import FastAPI from starlette.testclient import TestClient def test_TilerFactory(): """Test TilerFactory class.""" cog = TilerFactory() assert len(cog.router.routes) == 26 assert cog.tms_dependency == TMSParams cog = TilerFactory(router_prefix="something", tms_dependency=WebMercatorTMSParams) app = FastAPI() app.include_router(cog.router, prefix="/something") client = TestClient(app) response = client.get(f"/something/tilejson.json?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["tilejson"] response = client.get(f"/something/NZTM2000/tilejson.json?url={DATA_DIR}/cog.tif") assert response.status_code == 422 cog = TilerFactory(add_preview=False, add_part=False, add_statistics=False) assert len(cog.router.routes) == 17 app = FastAPI() cog = TilerFactory(optional_headers=[OptionalHeader.server_timing]) app.include_router(cog.router) add_exception_handlers(app, DEFAULT_STATUS_CODES) client = TestClient(app) response = client.get(f"/tiles/8/87/48?url={DATA_DIR}/cog.tif&rescale=0,1000") assert response.status_code == 200 assert response.headers["content-type"] == "image/jpeg" timing = response.headers["server-timing"] assert "dataread;dur" in timing assert "postprocess;dur" in timing assert "format;dur" in timing response = client.get( f"/tiles/8/87/48.tif?url={DATA_DIR}/cog.tif&expression=b1,b1,b1&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "int32" assert meta["count"] == 3 assert meta["width"] == 256 assert meta["height"] == 256 response = client.get( f"/tiles/8/84/47?url={DATA_DIR}/cog.tif&bidx=1&rescale=0,1000&colormap_name=viridis" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" cmap = urlencode( { "colormap": json.dumps( { "1": [58, 102, 24, 255], "2": [100, 177, 41], "3": "#b1b129", "4": "#ddcb9aFF", } ) } ) response = client.get(f"/tiles/8/84/47.png?url={DATA_DIR}/cog.tif&bidx=1&{cmap}") assert response.status_code == 200 assert response.headers["content-type"] == "image/png" # Bad colormap format cmap = urlencode({"colormap": json.dumps({"1": [58, 102]})}) response = client.get(f"/tiles/8/84/47.png?url={DATA_DIR}/cog.tif&bidx=1&{cmap}") assert response.status_code == 400 # no json encoding cmap = urlencode({"colormap": {"1": [58, 102]}}) response = client.get(f"/tiles/8/84/47.png?url={DATA_DIR}/cog.tif&bidx=1&{cmap}") assert response.status_code == 400 response = client.get( f"/preview?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=256" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/jpeg" timing = response.headers["server-timing"] assert "dataread;dur" in timing assert "postprocess;dur" in timing assert "format;dur" in timing response = client.get( f"/crop/-56.228,72.715,-54.547,73.188.png?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=256" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" timing = response.headers["server-timing"] assert "dataread;dur" in timing assert "postprocess;dur" in timing assert "format;dur" in timing response = client.get(f"/point/-56.228,72.715?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" timing = response.headers["server-timing"] assert "dataread;dur" in timing response = client.get(f"/tilejson.json?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["tilejson"] response = client.get(f"/WorldCRS84Quad/tilejson.json?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["tilejson"] response_qs = client.get( f"/tilejson.json?url={DATA_DIR}/cog.tif&TileMatrixSetId=WorldCRS84Quad" ) assert response.json()["tiles"] == response_qs.json()["tiles"] response = client.get(f"/tilejson.json?url={DATA_DIR}/cog.tif&tile_format=png") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["tilejson"] assert "png" in response.json()["tiles"][0] response = client.get(f"/tilejson.json?url={DATA_DIR}/cog.tif&minzoom=5&maxzoom=12") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["tilejson"] assert response.json()["minzoom"] == 5 assert response.json()["maxzoom"] == 12 response = client.get( f"/WMTSCapabilities.xml?url={DATA_DIR}/cog.tif&minzoom=5&maxzoom=12" ) assert response.status_code == 200 assert response.headers["content-type"] == "application/xml" response = client.get(f"/bounds?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["bounds"] response = client.get(f"/info?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["band_metadata"] assert not response.json().get("minzoom") response = client.get(f"/info.geojson?url={DATA_DIR}/cog.tif") assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" assert response.json()["type"] == "Feature" response = client.get(f"/metadata?url={DATA_DIR}/cog.tif&max_size=256") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["statistics"] assert response.json()["band_metadata"] response = client.get( f"/metadata?url={DATA_DIR}/cog.tif&bounds=-56.228,72.715,-54.547,73.188&max_size=256" ) assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["statistics"] assert response.json()["band_metadata"] response = client.get( f"/metadata?url={DATA_DIR}/cog.tif&bidx=1&histogram_range=0,100" ) assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert response.json()["statistics"]["1"]["histogram"][0][1] == 0.0 response = client.get(f"/metadata?url={DATA_DIR}/cog.tif&histogram_bins=4") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert len(response.json()["statistics"]["1"]["histogram"][0]) == 4 response = client.get( f"/metadata?url={DATA_DIR}/cog.tif&histogram_bins=1,2,3,4,5,6" ) assert response.status_code == 200 assert response.headers["content-type"] == "application/json" assert len(response.json()["statistics"]["1"]["histogram"][1]) == 6 assert response.json()["statistics"]["1"]["histogram"][1][0] == 1.0 response = client.get( f"/preview.png?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=256" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert 256 in (meta["width"], meta["height"]) response = client.get( f"/preview.png?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=256&height=512&width=512" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert meta["width"] == 512 assert meta["height"] == 512 response = client.get( f"/preview.png?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=0&nodata=0" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert meta["width"] == 2658 assert meta["height"] == 2667 response = client.get( f"/preview.png?url={DATA_DIR}/cog.tif&rescale=0,1000&max_size=0&nodata=0" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert meta["width"] == 2658 assert meta["height"] == 2667 response = client.get( f"/preview.tif?url={DATA_DIR}/cog_scale.tif&unscale=True&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "float32" assert meta["count"] == 1 response = client.get( f"/preview.tif?url={DATA_DIR}/cog_scale.tif&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "int16" assert meta["count"] == 1 feature = { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [-59.23828124999999, 74.16408546675687], [-59.83154296874999, 73.15680773175981], [-58.73291015624999, 72.88087095711504], [-56.62353515625, 73.06104462497655], [-55.17333984375, 73.41588526207096], [-55.2392578125, 74.09799577518739], [-56.88720703125, 74.2895142503942], [-57.23876953124999, 74.30735341486248], [-59.23828124999999, 74.16408546675687], ] ], }, } feature_collection = {"type": "FeatureCollection", "features": [feature]} response = client.post(f"/crop?url={DATA_DIR}/cog.tif", json=feature) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" response = client.post(f"/crop.tif?url={DATA_DIR}/cog.tif", json=feature) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "uint16" assert meta["count"] == 2 response = client.post(f"/crop/100x100.jpeg?url={DATA_DIR}/cog.tif", json=feature) assert response.status_code == 200 assert response.headers["content-type"] == "image/jpeg" meta = parse_img(response.content) assert meta["width"] == 100 assert meta["height"] == 100 # GET - statistics response = client.get(f"/statistics?url={DATA_DIR}/cog.tif&bidx=1,1,1") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 3 assert list(resp[0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_2", "percentile_98", "valid_pixels", "masked_pixels", "valid_percent", ] response = client.get(f"/statistics?url={DATA_DIR}/cog.tif&bidx=1,1,1&p=4") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 3 assert list(resp[0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_4", "valid_pixels", "masked_pixels", "valid_percent", ] response = client.get(f"/statistics?url={DATA_DIR}/cog.tif&categorical=true") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 1 assert list(resp[0]) == [ "categories", "valid_pixels", "masked_pixels", "valid_percent", ] assert len(resp[0]["categories"]) == 15 response = client.get( f"/statistics?url={DATA_DIR}/cog.tif&categorical=true&c=1&c=2&c=3&c=4" ) assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 1 assert list(resp[0]) == [ "categories", "valid_pixels", "masked_pixels", "valid_percent", ] assert len(resp[0]["categories"]) == 4 assert resp[0]["categories"]["4"] == 0 # POST - statistics response = client.post( f"/statistics?url={DATA_DIR}/cog.tif&bidx=1,1,1", json=feature ) assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" resp = response.json() assert resp["type"] == "Feature" assert len(resp["properties"]["statistics"]) == 3 assert list(resp["properties"]["statistics"][0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_2", "percentile_98", "valid_pixels", "masked_pixels", "valid_percent", ] response = client.post( f"/statistics?url={DATA_DIR}/cog.tif&bidx=1,1,1", json=feature_collection ) assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" resp = response.json() assert resp["type"] == "FeatureCollection" assert len(resp["features"][0]["properties"]["statistics"]) == 3 assert list(resp["features"][0]["properties"]["statistics"][0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_2", "percentile_98", "valid_pixels", "masked_pixels", "valid_percent", ] response = client.post( f"/statistics?url={DATA_DIR}/cog.tif&categorical=true", json=feature ) assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" resp = response.json() assert resp["type"] == "Feature" assert len(resp["properties"]["statistics"]) == 1 assert list(resp["properties"]["statistics"][0]) == [ "categories", "valid_pixels", "masked_pixels", "valid_percent", ] assert len(resp["properties"]["statistics"][0]["categories"]) == 12 response = client.post( f"/statistics?url={DATA_DIR}/cog.tif&categorical=true&c=1&c=2&c=3&c=4", json=feature, ) assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" resp = response.json() assert resp["type"] == "Feature" assert len(resp["properties"]["statistics"]) == 1 assert list(resp["properties"]["statistics"][0]) == [ "categories", "valid_pixels", "masked_pixels", "valid_percent", ] assert len(resp["properties"]["statistics"][0]["categories"]) == 4 assert resp["properties"]["statistics"][0]["categories"]["4"] == 0 @patch("rio_tiler.io.cogeo.rasterio") def test_MultiBaseTilerFactory(rio): """test MultiBaseTilerFactory.""" rio.open = mock_rasterio_open stac = MultiBaseTilerFactory(reader=STACReader) assert len(stac.router.routes) == 27 app = FastAPI() app.include_router(stac.router) add_exception_handlers(app, DEFAULT_STATUS_CODES) client = TestClient(app) response = client.get(f"/assets?url={DATA_DIR}/item.json") assert response.status_code == 200 assert len(response.json()) == 17 response = client.get(f"/bounds?url={DATA_DIR}/item.json") assert response.status_code == 200 assert len(response.json()["bounds"]) == 4 response = client.get(f"/info?url={DATA_DIR}/item.json&assets=B01") assert response.status_code == 200 assert response.json()["B01"] response = client.get(f"/info.geojson?url={DATA_DIR}/item.json&assets=B01") assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" assert response.json()["type"] == "Feature" response = client.get(f"/metadata?url={DATA_DIR}/item.json&assets=B01&bidx=1") assert response.status_code == 200 assert response.json()["B01"]["statistics"]["1"] response = client.get( f"/preview.tif?url={DATA_DIR}/item.json&assets=B01&bidx=1,1,1&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "uint16" assert meta["count"] == 3 response = client.get(f"/preview.tif?url={DATA_DIR}/item.json") assert response.status_code == 400 response = client.get( f"/preview.tif?url={DATA_DIR}/item.json&expression=B01,B01,B01&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "int32" assert meta["count"] == 3 # GET - statistics response = client.get(f"/statistics?url={DATA_DIR}/item.json&assets=B01,B09") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 2 assert list(resp[0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_2", "percentile_98", "valid_pixels", "masked_pixels", "valid_percent", ] @attr.s class BandFileReader(MultiBandReader): """Test MultiBand""" path: str = attr.ib() reader: Type[BaseReader] = attr.ib(default=COGReader) reader_options: Dict = attr.ib(factory=dict) tms: morecantile.TileMatrixSet = attr.ib( default=morecantile.tms.get("WebMercatorQuad") ) def __attrs_post_init__(self): """Parse Sceneid and get grid bounds.""" self.bands = sorted([p.stem for p in pathlib.Path(self.path).glob("B0*.tif")]) with self.reader(self._get_band_url(self.bands[0])) as cog: self.bounds = cog.bounds self.minzoom = cog.minzoom self.maxzoom = cog.maxzoom def _get_band_url(self, band: str) -> str: """Validate band's name and return band's url.""" return os.path.join(self.path, f"{band}.tif") def test_MultiBandTilerFactory(): """test MultiBandTilerFactory.""" bands = MultiBandTilerFactory(reader=BandFileReader) assert len(bands.router.routes) == 27 app = FastAPI() app.include_router(bands.router) add_exception_handlers(app, DEFAULT_STATUS_CODES) client = TestClient(app) response = client.get(f"/bands?url={DATA_DIR}") assert response.status_code == 200 assert response.json() == ["B01", "B09"] response = client.get(f"/info?url={DATA_DIR}&bands=B01") assert response.status_code == 200 assert response.json()["band_metadata"] == [["B01", {}]] response = client.get(f"/info.geojson?url={DATA_DIR}&bands=B01") assert response.status_code == 200 assert response.headers["content-type"] == "application/geo+json" response = client.get(f"/metadata?url={DATA_DIR}&bands=B01&bidx=1") assert response.status_code == 200 assert response.json()["statistics"]["B01"] response = client.get(f"/preview.tif?url={DATA_DIR}&return_mask=false") assert response.status_code == 400 response = client.get( f"/preview.tif?url={DATA_DIR}&bands=B01,B09,B01&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "uint16" assert meta["count"] == 3 response = client.get( f"/preview.tif?url={DATA_DIR}&expression=B01,B09,B01&return_mask=false" ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff; application=geotiff" meta = parse_img(response.content) assert meta["dtype"] == "int32" assert meta["count"] == 3 # GET - statistics response = client.get(f"/statistics?url={DATA_DIR}&bands=B01,B09") assert response.status_code == 200 assert response.headers["content-type"] == "application/json" resp = response.json() assert len(resp) == 2 assert list(resp[0]) == [ "min", "max", "mean", "count", "sum", "std", "median", "majority", "minority", "unique", "percentile_2", "percentile_98", "valid_pixels", "masked_pixels", "valid_percent", ] def test_TMSFactory(): """test TMSFactory.""" tms_endpoints = TMSFactory(router_prefix="tms") assert len(tms_endpoints.router.routes) == 2 app = FastAPI() app.include_router(tms_endpoints.router, prefix="/tms") client = TestClient(app) response = client.get("/tms/tileMatrixSets") assert response.status_code == 200 body = response.json() assert len(body["tileMatrixSets"]) == 10 # morecantile has 10 defaults tms = list(filter(lambda m: m["id"] == "WebMercatorQuad", body["tileMatrixSets"]))[ 0 ] assert ( tms["links"][0]["href"] == "http://testserver/tms/tileMatrixSets/WebMercatorQuad" ) response = client.get("/tms/tileMatrixSets/WebMercatorQuad") assert response.status_code == 200 body = response.json() assert body["type"] == "TileMatrixSetType" assert body["identifier"] == "WebMercatorQuad"
33.799419
101
0.632665
2,806
23,254
5.149679
0.101924
0.124983
0.083045
0.099654
0.775848
0.75218
0.737855
0.731488
0.700069
0.68263
0
0.049717
0.209426
23,254
687
102
33.848617
0.736292
0.015309
0
0.589565
0
0.031304
0.296749
0.139688
0
0
0
0
0.368696
1
0.010435
false
0
0.027826
0
0.048696
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
73c5fedff09e5abf43620d438c279de15a97bee0
421
py
Python
gateapi-python/gate_api/api/__init__.py
jarenmt/IEOPUMP
220f7f612d299f7305e82fe6c33661e6871f2d86
[ "MIT" ]
null
null
null
gateapi-python/gate_api/api/__init__.py
jarenmt/IEOPUMP
220f7f612d299f7305e82fe6c33661e6871f2d86
[ "MIT" ]
null
null
null
gateapi-python/gate_api/api/__init__.py
jarenmt/IEOPUMP
220f7f612d299f7305e82fe6c33661e6871f2d86
[ "MIT" ]
null
null
null
from __future__ import absolute_import # flake8: noqa # import apis into api package from gate_api.api.delivery_api import DeliveryApi from gate_api.api.futures_api import FuturesApi from gate_api.api.margin_api import MarginApi from gate_api.api.options_api import OptionsApi from gate_api.api.spot_api import SpotApi from gate_api.api.wallet_api import WalletApi from gate_api.api.withdrawal_api import WithdrawalApi
32.384615
53
0.857482
68
421
5.029412
0.367647
0.163743
0.225146
0.28655
0
0
0
0
0
0
0
0.002646
0.102138
421
12
54
35.083333
0.902116
0.097387
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
73e1cdeca05126116c5902bc0ce8984927367d96
85
py
Python
allrank/utils/args_utils.py
almajo/allRank
845c191ed00e112351437c8884cbe5573def9531
[ "Apache-2.0" ]
473
2019-10-10T13:51:24.000Z
2022-03-31T18:19:42.000Z
allrank/utils/args_utils.py
almajo/allRank
845c191ed00e112351437c8884cbe5573def9531
[ "Apache-2.0" ]
26
2020-01-23T09:06:17.000Z
2022-03-04T23:08:13.000Z
allrank/utils/args_utils.py
almajo/allRank
845c191ed00e112351437c8884cbe5573def9531
[ "Apache-2.0" ]
63
2019-10-14T18:12:27.000Z
2022-03-18T20:47:04.000Z
def split_as_strings(splits): return [str(x).strip() for x in splits.split(",")]
28.333333
54
0.670588
14
85
3.928571
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.141176
85
2
55
42.5
0.753425
0
0
0
0
0
0.011765
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
73e3ddc6b1ec643aa4205a89774df62bd3c7ab43
44
py
Python
examples/catalyst_rl/env.py
gr33n-made/catalyst
bd413abc908ef7cbdeab42b0e805277a791e3ddb
[ "Apache-2.0" ]
2,693
2019-01-23T19:16:12.000Z
2022-03-31T02:12:42.000Z
examples/catalyst_rl/env.py
gr33n-made/catalyst
bd413abc908ef7cbdeab42b0e805277a791e3ddb
[ "Apache-2.0" ]
763
2019-01-22T20:12:56.000Z
2022-03-27T18:36:10.000Z
examples/catalyst_rl/env.py
gr33n-made/catalyst
bd413abc908ef7cbdeab42b0e805277a791e3ddb
[ "Apache-2.0" ]
445
2019-01-23T17:07:09.000Z
2022-03-30T05:38:45.000Z
# flake8: noqa class IEnvironment: pass
11
19
0.704545
5
44
6.2
1
0
0
0
0
0
0
0
0
0
0
0.029412
0.227273
44
3
20
14.666667
0.882353
0.272727
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
73ed8005c51b0195b11524410b7afecbc0c6a9cf
129
py
Python
promisedio/loop/_internal.py
promisedio/uv
b2da55e28da4a3185d810055468389822ec94f2b
[ "MIT" ]
null
null
null
promisedio/loop/_internal.py
promisedio/uv
b2da55e28da4a3185d810055468389822ec94f2b
[ "MIT" ]
null
null
null
promisedio/loop/_internal.py
promisedio/uv
b2da55e28da4a3185d810055468389822ec94f2b
[ "MIT" ]
null
null
null
# Copyright (c) 2021-2022 Andrey Churin <aachurin@gmail.com> Promisedio def __sigtrap(): # do nothing, just a trap pass
21.5
71
0.697674
18
129
4.888889
1
0
0
0
0
0
0
0
0
0
0
0.07767
0.20155
129
5
72
25.8
0.776699
0.72093
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
fb5176bc4957990527bef7478ef8ef4ee143519d
3,933
py
Python
bvpl/coexist/find_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
2
2015-09-18T00:17:16.000Z
2019-02-06T04:41:29.000Z
bvpl/coexist/find_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
null
null
null
bvpl/coexist/find_corners.py
mirestrepo/voxels-at-lems
df47d031653d2ad877a97b3c1ea574b924b7d4c2
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/python # Script to run find 2d corners on appearance grid # Author : Isabel Restrepo #8-31-2009 import bvpl_batch import time import sys import os #time.sleep(30); bvpl_batch.register_processes(); bvpl_batch.register_datatypes(); class dbvalue: def __init__(self, index, type): self.id = index # unsigned integer self.type = type # string save_hue = 0; #data_dir = "/Users/isa/Experiments/CapitolSFM/few_windows" #output_dir = "/Users/isa/Experiments/CapitolSFM/few_windows/ocp+app/all_directions" data_dir = sys.argv[1]; output_dir = sys.argv[2]; if not os.path.isdir( output_dir + "/"): os.mkdir( output_dir + "/"); print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0, data_dir +"/KL_gaussf1.vox"); bvpl_batch.set_input_string(1,"bsta_gauss_f1"); bvpl_batch.run_process(); (world_id,world_type)= bvpl_batch.commit_output(0); app_grid = dbvalue(world_id,world_type); print("Load Voxel Grid"); bvpl_batch.init_process("bvxmLoadGridProcess"); bvpl_batch.set_input_string(0, data_dir +"/ocp.vox"); bvpl_batch.set_input_string(1,"float"); bvpl_batch.run_process(); (world_id,world_type)= bvpl_batch.commit_output(0); ocp_grid = dbvalue(world_id,world_type); print("Creating corner 2d kernel"); bvpl_batch.init_process("bvplCreateCorner2dKernelVectorProcess"); bvpl_batch.set_input_unsigned(0, 3); #half length bvpl_batch.set_input_unsigned(1, 3); #half width bvpl_batch.set_input_unsigned(2, 1); #half thickness bvpl_batch.run_process(); (kernel_id,kernel_type)= bvpl_batch.commit_output(0); kernel_vector = dbvalue(kernel_id,kernel_type); print("Running Kernels"); bvpl_batch.init_process("bvplOperateOcpAndAppProcess"); bvpl_batch.set_input_from_db(0,ocp_grid ); bvpl_batch.set_input_from_db(1,app_grid ); bvpl_batch.set_input_from_db(2,kernel_vector); bvpl_batch.set_input_string(3,"find_surface"); bvpl_batch.set_input_string(4,"gauss_convolution"); bvpl_batch.set_input_string(5, output_dir + "/resp.vox"); bvpl_batch.set_input_string(6, output_dir + "/id.vox"); bvpl_batch.run_process(); (all_resp_grid_id,all_resp_grid_type)= bvpl_batch.commit_output(0); all_resp_grid = dbvalue(all_resp_grid_id,all_resp_grid_type); (all_id_grid_id,all_id_grid_type)= bvpl_batch.commit_output(1); all_id_grid = dbvalue(all_id_grid_id, all_id_grid_type); print("Getting top response"); bvpl_batch.init_process("bvplExtractTopResponseProcess"); bvpl_batch.set_input_from_db(0,all_resp_grid ); bvpl_batch.set_input_from_db(1,all_id_grid); bvpl_batch.set_input_unsigned(2,0); bvpl_batch.set_input_string(3, output_dir + "/top_resp.vox"); bvpl_batch.set_input_string(4, output_dir + "/top_id.vox"); bvpl_batch.run_process(); (response_grid_id,response_grid_type)= bvpl_batch.commit_output(0); response_grid = dbvalue(response_grid_id,response_grid_type); (id_grid_id,id_grid_type)= bvpl_batch.commit_output(1); id_grid = dbvalue(id_grid_id,id_grid_type); if save_hue : print("Converting ID to Hue "); bvpl_batch.init_process("bvplConvertIdToHueProcess"); bvpl_batch.set_input_from_db(0,id_grid ); bvpl_batch.set_input_from_db(1,response_grid ); bvpl_batch.set_input_from_db(2,kernel_vector); bvpl_batch.set_input_string(3, output_dir + "/hue.vox"); bvpl_batch.set_input_string(4, output_dir + "/hue.svg"); bvpl_batch.run_process(); (hue_grid_id,hue_grid_type)= bvpl_batch.commit_output(0); hue_grid = dbvalue(hue_grid_id,hue_grid_type); print("Writing Orientation Grid"); bvpl_batch.init_process("bvxmGridToImageStackProcess"); bvpl_batch.set_input_from_db(0,hue_grid); bvpl_batch.set_input_string(1,"vnl_float_4"); bvpl_batch.set_input_string(2,output_dir + "/hue_world/"); bvpl_batch.run_process(); print("Writing Response Grid"); bvpl_batch.init_process("bvxmSaveGridRawProcess"); bvpl_batch.set_input_from_db(0,response_grid); bvpl_batch.set_input_string(1,output_dir + "/resp.raw"); bvpl_batch.run_process();
3,933
3,933
0.792271
634
3,933
4.493691
0.179811
0.176904
0.122148
0.173043
0.593542
0.542998
0.443313
0.271674
0.17199
0.146718
0
0.016231
0.075769
3,933
1
3,933
3,933
0.767538
0.996186
0
0.188235
0
0
0.144244
0.046325
0
0
0
0
0
1
0.011765
false
0
0.047059
0
0.070588
0.094118
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
fb8c36aa376789586cde5492b07f43a84fe44151
173
py
Python
src/data/preprocessing/NoPreprocessing.py
ab3llini/ASLRecognizer
9a98887b13b73bb81bd4d6d8ebbfb13c4ef7e856
[ "MIT" ]
null
null
null
src/data/preprocessing/NoPreprocessing.py
ab3llini/ASLRecognizer
9a98887b13b73bb81bd4d6d8ebbfb13c4ef7e856
[ "MIT" ]
null
null
null
src/data/preprocessing/NoPreprocessing.py
ab3llini/ASLRecognizer
9a98887b13b73bb81bd4d6d8ebbfb13c4ef7e856
[ "MIT" ]
1
2019-04-16T17:20:28.000Z
2019-04-16T17:20:28.000Z
from src.data.preprocessing.AbstractPreprocessing import AbstractPreprocessing class NoPreprocessing(AbstractPreprocessing): def preprocess(self, x): return x
24.714286
78
0.797688
16
173
8.625
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.144509
173
7
79
24.714286
0.932432
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
fbab358d5b41a13b732ee800c535ed59a1434533
4,991
py
Python
ofm_client/v2/__init__.py
factset/ofm-python-sdk
3345fea40fa287e899dbab01b1a839a0eeab468c
[ "Apache-2.0" ]
1
2020-05-22T11:03:35.000Z
2020-05-22T11:03:35.000Z
ofm_client/v2/__init__.py
factset/ofm-python-sdk
3345fea40fa287e899dbab01b1a839a0eeab468c
[ "Apache-2.0" ]
1
2020-05-15T07:54:51.000Z
2020-05-15T07:54:51.000Z
ofm_client/v2/__init__.py
factset/ofm-python-sdk
3345fea40fa287e899dbab01b1a839a0eeab468c
[ "Apache-2.0" ]
1
2020-05-12T11:14:35.000Z
2020-05-12T11:14:35.000Z
# coding: utf-8 # flake8: noqa """ Open:FactSet Marketplace API Headless CMS API used by the Open:FactSet Marketplace. # noqa: E501 OpenAPI spec version: v2.1.5 Contact: openfactset-frontend-engineering@factset.com """ from __future__ import absolute_import # import apis into sdk package from ofm_client.v2.api.attributes_api import AttributesApi from ofm_client.v2.api.attributes_groups_api import AttributesGroupsApi from ofm_client.v2.api.media_api import MediaApi from ofm_client.v2.api.partners_api import PartnersApi from ofm_client.v2.api.products_api import ProductsApi from ofm_client.v2.api.resources_api import ResourcesApi from ofm_client.v2.api.resources_sections_api import ResourcesSectionsApi # import ApiClient from ofm_client.v2.api_client import ApiClient from ofm_client.v2.configuration import Configuration # import models into sdk package from ofm_client.v2.models.document import Document from ofm_client.v2.models.document_section import DocumentSection from ofm_client.v2.models.get_attribute_dto import GetAttributeDto from ofm_client.v2.models.get_attributes_group_dto import GetAttributesGroupDto from ofm_client.v2.models.get_attributes_group_dto_attributes import GetAttributesGroupDtoAttributes from ofm_client.v2.models.get_partner_dto import GetPartnerDto from ofm_client.v2.models.get_partner_dto_forum_tags import GetPartnerDtoForumTags from ofm_client.v2.models.get_partner_dto_seo_meta import GetPartnerDtoSeoMeta from ofm_client.v2.models.get_partner_dto_social_media import GetPartnerDtoSocialMedia from ofm_client.v2.models.get_partner_dto_version_schema import GetPartnerDtoVersionSchema from ofm_client.v2.models.get_product_dto import GetProductDto from ofm_client.v2.models.get_product_dto_attributes import GetProductDtoAttributes from ofm_client.v2.models.get_product_dto_attributes_groups import GetProductDtoAttributesGroups from ofm_client.v2.models.get_product_dto_columns import GetProductDtoColumns from ofm_client.v2.models.get_product_dto_coverage_table import GetProductDtoCoverageTable from ofm_client.v2.models.get_product_dto_highlight import GetProductDtoHighlight from ofm_client.v2.models.get_product_dto_partner import GetProductDtoPartner from ofm_client.v2.models.get_product_dto_partner_social_media import GetProductDtoPartnerSocialMedia from ofm_client.v2.models.get_product_dto_preview_link import GetProductDtoPreviewLink from ofm_client.v2.models.get_product_dto_related_products import GetProductDtoRelatedProducts from ofm_client.v2.models.get_product_dto_seo_meta import GetProductDtoSeoMeta from ofm_client.v2.models.get_product_dto_third_party_urls import GetProductDtoThirdPartyUrls from ofm_client.v2.models.get_product_dto_version_schema import GetProductDtoVersionSchema from ofm_client.v2.models.get_resource_dto import GetResourceDto from ofm_client.v2.models.get_resources_section_dto import GetResourcesSectionDto from ofm_client.v2.models.get_resources_section_dto_meta import GetResourcesSectionDtoMeta from ofm_client.v2.models.inline_response200 import InlineResponse200 from ofm_client.v2.models.marking import Marking from ofm_client.v2.models.post_attribute_search_dto import PostAttributeSearchDto from ofm_client.v2.models.post_attributes_group_search_dto import PostAttributesGroupSearchDto from ofm_client.v2.models.post_partner_search_dto import PostPartnerSearchDto from ofm_client.v2.models.post_product_search_dto import PostProductSearchDto from ofm_client.v2.models.post_resource_search_dto import PostResourceSearchDto from ofm_client.v2.models.post_resources_section_search_dto import PostResourcesSectionSearchDto # import query builder into sdk package from ofm_client.v2.query_builder.search.interfaces.i_search_fields import ISearchFields from ofm_client.v2.query_builder.search.interfaces.i_search_field import ISearchField from ofm_client.v2.query_builder.search.interfaces.i_search import ISearch from ofm_client.v2.query_builder.search.search_term import SearchTerm from ofm_client.v2.query_builder.search.search import Search from ofm_client.v2.query_builder.filters.interfaces.i_filter_field import IFilterField from ofm_client.v2.query_builder.filters.interfaces.i_filter import IFilter from ofm_client.v2.query_builder.filters.filter_operators import FilterOperator from ofm_client.v2.query_builder.filters.filter_term import FilterTerm from ofm_client.v2.query_builder.filters.filter import Filter from ofm_client.v2.query_builder.sort.interfaces.i_sort import ISort from ofm_client.v2.query_builder.sort.interfaces.i_sort_field import ISortField from ofm_client.v2.query_builder.sort.sort_term import SortTerm from ofm_client.v2.query_builder.sort.sort_operators import SortOperator from ofm_client.v2.query_builder.sort.sort import Sort from ofm_client.v2.query_builder.query.interfaces.i_query import IQuery from ofm_client.v2.query_builder.query.query import Query from ofm_client.v2.query_builder.query_builder import QueryBuilder
54.25
101
0.885794
718
4,991
5.85376
0.194986
0.101594
0.188675
0.217702
0.505829
0.482275
0.388056
0.348085
0.162741
0.081846
0
0.016112
0.067321
4,991
91
102
54.846154
0.886788
0.065117
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fbace7e66486adafd034a4f2096323d7832df664
125
py
Python
graphene_django_optimizer/utils.py
NyanKiyoshi/graphene-django-optimizer
f9ecbf8952312c46c0b1820253ee824d594ae4a6
[ "MIT" ]
7
2021-09-19T23:09:24.000Z
2022-03-03T23:35:37.000Z
graphene_django_optimizer/utils.py
NyanKiyoshi/graphene-django-optimizer
f9ecbf8952312c46c0b1820253ee824d594ae4a6
[ "MIT" ]
13
2020-03-24T17:53:51.000Z
2022-02-10T20:01:14.000Z
graphene_django_optimizer/utils.py
NyanKiyoshi/graphene-django-optimizer
f9ecbf8952312c46c0b1820253ee824d594ae4a6
[ "MIT" ]
1
2020-06-11T19:15:51.000Z
2020-06-11T19:15:51.000Z
noop = lambda *args, **kwargs: None def is_iterable(obj): return hasattr(obj, '__iter__') and not isinstance(obj, str)
20.833333
64
0.696
18
125
4.555556
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.168
125
5
65
25
0.788462
0
0
0
0
0
0.064
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
fbd1a58d13f55892897a899cdf4258c3098ff1b1
66
py
Python
src/zn_operation_table/__init__.py
JuanpiCasti/zn_operation_table_generator
4103d0cba5bcb089bd04217fe5c206019790c3da
[ "MIT" ]
null
null
null
src/zn_operation_table/__init__.py
JuanpiCasti/zn_operation_table_generator
4103d0cba5bcb089bd04217fe5c206019790c3da
[ "MIT" ]
null
null
null
src/zn_operation_table/__init__.py
JuanpiCasti/zn_operation_table_generator
4103d0cba5bcb089bd04217fe5c206019790c3da
[ "MIT" ]
null
null
null
from .zn_operation_table import build_table build_table(7, 'sum')
22
43
0.818182
11
66
4.545455
0.727273
0.4
0
0
0
0
0
0
0
0
0
0.016667
0.090909
66
3
44
22
0.816667
0
0
0
0
0
0.044776
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
fbdcbb7bdb409f9764e59166a70ba1c01ab05636
180
py
Python
exe_05_04_dicionario.py
blimundo/exercicios_python
2ef1bc15b28e599adf3c063b6d971878cc3ba168
[ "MIT" ]
null
null
null
exe_05_04_dicionario.py
blimundo/exercicios_python
2ef1bc15b28e599adf3c063b6d971878cc3ba168
[ "MIT" ]
null
null
null
exe_05_04_dicionario.py
blimundo/exercicios_python
2ef1bc15b28e599adf3c063b6d971878cc3ba168
[ "MIT" ]
null
null
null
"""Dicionário Escreva uma função que simula a função dict() do Python. """ def myDict(**keywords: dict) -> dict: return keywords print(myDict(a=10, b=20, c=30, d=40, e=50))
18
56
0.661111
30
180
3.966667
0.8
0
0
0
0
0
0
0
0
0
0
0.067114
0.172222
180
9
57
20
0.731544
0.377778
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0
0.333333
0.666667
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
5
837519e9936c76ee5074c5f8caba8cec072207ec
212
py
Python
rsmtpd/response/smtp_503.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
1
2017-06-12T04:10:07.000Z
2017-06-12T04:10:07.000Z
rsmtpd/response/smtp_503.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
null
null
null
rsmtpd/response/smtp_503.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
null
null
null
from rsmtpd.response.action import OK from rsmtpd.response.base_response import BaseResponse class SmtpResponse503(BaseResponse): _smtp_code = 503 _message = "Bad sequence of commands" _action = OK
23.555556
54
0.773585
26
212
6.115385
0.692308
0.125786
0.226415
0
0
0
0
0
0
0
0
0.034091
0.169811
212
8
55
26.5
0.869318
0
0
0
0
0
0.113208
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
8387e524885296c2dbc7d1d7d30091c7628da8a9
44
py
Python
examples/str.splitlines/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
examples/str.splitlines/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
examples/str.splitlines/ex1.py
mcorne/python-by-example
15339c0909c84b51075587a6a66391100971c033
[ "MIT" ]
null
null
null
print('ab c\n\nde fg\rkl\r\n'.splitlines())
22
43
0.659091
10
44
2.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.068182
44
1
44
44
0.707317
0
0
0
0
0
0.477273
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
83b28608883fa70735f99ee3742348412301e368
2,584
py
Python
tests/app/articles/test_routing.py
cds-snc/notifier-admin
69aab94cb8d1711488e471cac52223c7f9f6f90e
[ "MIT" ]
null
null
null
tests/app/articles/test_routing.py
cds-snc/notifier-admin
69aab94cb8d1711488e471cac52223c7f9f6f90e
[ "MIT" ]
1
2019-07-05T15:09:55.000Z
2019-07-05T18:13:06.000Z
tests/app/articles/test_routing.py
cds-snc/notifier-admin
69aab94cb8d1711488e471cac52223c7f9f6f90e
[ "MIT" ]
null
null
null
import pytest from app.articles.routing import GC_ARTICLES_ROUTES, gca_url_for @pytest.mark.parametrize("route, lang, expectedURL", [("home", "en", "/home"), ("home", "fr", "/accueil")]) def test_gca_url_for_works_with_valid_routes(mocker, route, lang, expectedURL): mocker.patch("app.articles.routing.get_current_locale", return_value=lang) route = gca_url_for(route) assert route == expectedURL @pytest.mark.parametrize("route, lang", [("homez", "en"), ("homez", "fr")]) def test_gca_url_for_fails_with_invalid_routes(mocker, route, lang): mocker.patch("app.articles.routing.get_current_locale", return_value=lang) with pytest.raises(Exception): gca_url_for(route) @pytest.mark.parametrize( "route, lang, expectedURL", [("home", "en", "http://localhost/home"), ("home", "fr", "http://localhost/accueil")] ) def test_gca_url_for_creates_asbolute_url(app_, mocker, route, lang, expectedURL): mocker.patch("app.articles.routing.get_current_locale", return_value=lang) mocker.patch("app.articles.routing.url_for", return_value="http://localhost") route = gca_url_for(route, _external=True) assert route == expectedURL @pytest.mark.skip(reason="these tests reach out to GCA and are flaky; may re-enable inside of an integration-type suite") @pytest.mark.integration @pytest.mark.parametrize("route", list(GC_ARTICLES_ROUTES)) def test_ensure_all_french_gca_routes_in_GC_ARTICLES_ROUTES_exist(client_request, mocker, route): mocker.patch("app.articles.routing.get_current_locale", return_value="fr") mocker.patch("app.main.views.index.get_current_locale", return_value="fr") render_article = mocker.patch("app.main.views.index._render_articles_page", return_value="") url = gca_url_for(route) client_request.get_url(url, _expected_status=200, _test_page_title=False) assert render_article.called @pytest.mark.skip(reason="these tests reach out to GCA and are flaky; may re-enable inside of an integration-type suite") @pytest.mark.integration @pytest.mark.parametrize("route", list(GC_ARTICLES_ROUTES)) def test_ensure_all_english_gca_routes_in_GC_ARTICLES_ROUTES_exist(client_request, mocker, route): mocker.patch("app.articles.routing.get_current_locale", return_value="en") mocker.patch("app.main.views.index.get_current_locale", return_value="en") render_article = mocker.patch("app.main.views.index._render_articles_page", return_value="") url = gca_url_for(route) client_request.get_url(url, _expected_status=200, _test_page_title=False) assert render_article.called
43.066667
121
0.763545
370
2,584
5.032432
0.227027
0.032223
0.075188
0.082707
0.842642
0.773899
0.727175
0.727175
0.676692
0.676692
0
0.00259
0.103328
2,584
59
122
43.79661
0.801036
0
0
0.475
0
0.05
0.294118
0.148994
0
0
0
0
0.1
1
0.125
false
0
0.05
0
0.175
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
83c1ed2f845ac9af71c687712b733c8b05ec39f8
4,050
py
Python
vunet/evaluation/test_ids.py
gabolsgabs/vunet
d34726e55195c759ec63c003f2eaacda0a825865
[ "AFL-3.0" ]
1
2021-05-03T03:16:22.000Z
2021-05-03T03:16:22.000Z
vunet/evaluation/test_ids.py
gabolsgabs/vunet
d34726e55195c759ec63c003f2eaacda0a825865
[ "AFL-3.0" ]
null
null
null
vunet/evaluation/test_ids.py
gabolsgabs/vunet
d34726e55195c759ec63c003f2eaacda0a825865
[ "AFL-3.0" ]
null
null
null
ids = [ "0054d4d2b1f340a088b9d911bdd31f28", "028cb9312382479e84e71cc9d0d85d1d", "02b296bbee3c4d5e84be5d2ab5658450", "04a7e4c7381040da900a531918b129c5", "0627b246b0094eb188582b5f85c89a9d", "07045583ddf5450c93550a40cbc92a63", "075a5e7e6a60403481b2795405ab28fb", "098e29e7e6b140129bbdfd0d4d6a7763", "09aae3f31367406bac6753701c291210", "09d9713573c94f7d953c4707161ffc50", "0aeadc81354d47f68df8f4a0cdf0c1a1", "0b2934ecce8243e4899176fd66c46e60", "138df7da0ff64766904e271ef1f0d759", "1994fef246cb41fd829236393ad12835", "1a55d67522e34965997da8c8fee10061", "1e6a3a469f0e470498e164aafef7f93f", "223a01dd434d4510ae2c4d2df1ff23a7", "275f6a8de8b94f52a4147e1c2cad691d", "28a731d3a485404595772283498258da", "2d23bfc645174d719d63496fc4270b18", "349ce242d0cc4093946be40a6feec9c7", "34f2089385954d67a151a52cb9777cac", "38e960304d594f4fbd60f276f8aefae5", "3a0475b9b872458abb9ffb67cd6b3b0a", "3a4fb09e5ed4464fb9b174f4d5b6fa97", "3bad8486391d41cebcbd02d4e0802531", "3f3fd8cce19549a0a0f636f2207e66ec", "3f731bbf73324e239c48ef60f0ea596b", "4490084e194a498bb9b1162ae9e3b89d", "47df0f19739944ecb26867ee88c65074", "4b85c941676c4b658c8ae88fdff769be", "4dcd58bb6abc4bec9aad7e99047f138d", "54e1ecf68ff34b2c91ab16d37afb37f6", "55a7131d82364d00acad36a65499cefa", "58b861a63f154fb6b9369e1ec95fa99a", "5a1e44647dfd422183fef2c084b85ba6", "5b6e6fb2dd9e405cb5031c793e0589e7", "5bd5e05cc268483cb2f15d05a7f13dd5", "5c6ffb8263bc45ce9b4d3e0b309d530a", "63c2d32a96b7433896c2a70c9bdf280e", "656e4837f36a49c6a8e3a00f610793bf", "6a6a9ad6e1a246fdaf86015327ceb680", "6c59d4e7f8274ee28c3e8dbaff26d77b", "7101337d219a47be976e0363f4e25d4d", "79e23003861449b99b575d54b86bff23", "7b561198d0594a9397fd22356dfb8c25", "7f78b39f6b2b4b26a4fdf7aa63055e51", "80339d51a0a44f99934dc1b6241ee47d", "83b4fbe9abf8420798d00fb77ac8c8fb", "85efa060214a405c9e58795a6c968792", "8802a1b650df456dbf20fd8c7c80780a", "8bd18197a2474508a4e1f11b6292317f", "8bfea0d225be4cf98ae9bee74ac63b57", "8ed3731457ae4d0ebc427dbeb3b984c3", "8f157cb75225415a953e8aa1ec31fe65", "9057e9bfa65544c7bb3cae2f7d089cda", "90affe98aafb4eb5b60b6e92d0740bfa", "977300d8aa4c4feaad3fa4734dfc7cf4", "a1d44044903d4661916db09d3ba3af86", "a2307f4fb6e844c18c85be373f2afaa7", "a3263734562b41e9be2429ba929cf2ce", "a689f3a4790c4dd2b6640e757908d33d", "a7bb0d130c40454896dbc9eff69e9883", "aa0438ef1c3b42c5b78ad0dd1d96935c", "aa37a2c59db740be98787e21232d823a", "aa68bd15ffff43ea9b81d9423ffd3066", "ab2f49b30d074bcda07ea9da7a20675f", "ac932de5abc946469a11d84101304e73", "adec67e8fbaf426aa5cf2d0f72ac3ab8", "ae480b71c18a409eae560944fc45c596", "aec6d16f0a9946c79401d1fd30f00d19", "b3cf740cc89f4546a770f9a40b48d30c", "b6e887f61bd642b580bdbcd2df61b974", "b8e906edbf1d4c4789609dac816dab79", "b9b6b683d5db43369bbe10ff9fbbf2cf", "bb9411207b1d4f64aad3f7f61188c7df", "bddb1d0dfc644be9b8a3c47174e189b3", "c18efc51dfc24c9a8f879f270eebaf30", "c2ba6a5c606f4c69865fc02d1abe42c7", "c372c4eeb29e42f38a138ff653e86337", "c4de84ec16c64ce3877a420d5046a26a", "c4e3f4420d7449c6acddd7b86155b17d", "c6fe0a4e98454ce987ede487825da760", "c9aff9dc5f3e444ba31c28d2df95509f", "c9cbd0064c4f4026a0568406a1405de0", "cc9d502762e84305a4c872f62169e895", "d19f2702ced74a77b8d4479452763188", "d538b4747e9640e490ce8cb7a84a0a0e", "e05f1471e4474245bdbb520a124771a6", "e167448352574f1d8e90009b1b34acd1", "e7376c9c173d4effbb1cfcac758aad48", "e76a7846ac9d4a4dae8997cb4e58ded2", "ea34afda101148f6a8da883646f1ebb0", "ebae2a2ec78c4c978f400e26fd416f27", "efc72bafe7b54befafaae134049c47cd", "f0db1df05d7940e69af522dff0f0b949", "f47fe77b965c44798f1cad8aa95782c5", "f5856979ed124e86ab54eed0943b9dd7", "fb11ab318a83472aac0b1d90403007fe", "fb38c36793654dbaa6107b91b7f88e6e", "ffbfd5b3ebe542858c9e2cf45dd26e10", ]
38.942308
39
0.798765
102
4,050
31.715686
1
0
0
0
0
0
0
0
0
0
0
0.565377
0.125679
4,050
103
40
39.320388
0.348207
0
0
0
0
0
0.798025
0.798025
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
83d66b32b58e6160c55d0a74af59a8a305bdd4c8
88
py
Python
leadrouter/__init__.py
RealGeeks/lead_router.py
61da9028421131f74e9538f1d698c7f8644f8574
[ "MIT" ]
null
null
null
leadrouter/__init__.py
RealGeeks/lead_router.py
61da9028421131f74e9538f1d698c7f8644f8574
[ "MIT" ]
1
2017-03-22T02:18:31.000Z
2017-03-23T00:47:45.000Z
leadrouter/__init__.py
RealGeeks/lead_router.py
61da9028421131f74e9538f1d698c7f8644f8574
[ "MIT" ]
null
null
null
from .client import Client, HTTPError from .publisher import Publisher, DebugPublisher
22
48
0.829545
10
88
7.3
0.6
0
0
0
0
0
0
0
0
0
0
0
0.125
88
3
49
29.333333
0.948052
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
83e3c43795916fd27fc5ea4b96f07114ec9e05c3
181
py
Python
src/compas_hpc/core/__init__.py
philianeles/compas
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
[ "MIT" ]
null
null
null
src/compas_hpc/core/__init__.py
philianeles/compas
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
[ "MIT" ]
null
null
null
src/compas_hpc/core/__init__.py
philianeles/compas
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
[ "MIT" ]
null
null
null
from .euler import * from .cuda import * from .opencl import * from .euler import __all__ as a from .cuda import __all__ as b from .opencl import __all__ as c __all__ = a + b + c
18.1
32
0.718232
31
181
3.677419
0.322581
0.263158
0.289474
0
0
0
0
0
0
0
0
0
0.21547
181
9
33
20.111111
0.802817
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.857143
0
0.857143
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
83e623919770c4cf7c462855c24f1a5bf96e8ef6
53
py
Python
colorchron/colorwheel/__init__.py
harmsm/pantone
23f7875680666f5546757f988e872f86ad76b888
[ "MIT" ]
null
null
null
colorchron/colorwheel/__init__.py
harmsm/pantone
23f7875680666f5546757f988e872f86ad76b888
[ "MIT" ]
null
null
null
colorchron/colorwheel/__init__.py
harmsm/pantone
23f7875680666f5546757f988e872f86ad76b888
[ "MIT" ]
null
null
null
from .wheels import RGB, CMY, HSV, RYB, Chromachron
17.666667
51
0.735849
8
53
4.875
1
0
0
0
0
0
0
0
0
0
0
0
0.169811
53
2
52
26.5
0.886364
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f7d30eb2aded559bff99826054d2369e7cdb4f24
22,703
py
Python
src/deutschland/travelwarning/api/default_api.py
andreasbossard/deutschland
6f561256c707e21f81b54b139b9acb745b901298
[ "Apache-2.0" ]
445
2021-07-26T22:00:26.000Z
2022-03-31T08:31:08.000Z
src/deutschland/travelwarning/api/default_api.py
andreasbossard/deutschland
6f561256c707e21f81b54b139b9acb745b901298
[ "Apache-2.0" ]
30
2021-07-27T15:42:23.000Z
2022-03-26T16:14:11.000Z
src/deutschland/travelwarning/api/default_api.py
andreasbossard/deutschland
6f561256c707e21f81b54b139b9acb745b901298
[ "Apache-2.0" ]
28
2021-07-27T10:48:43.000Z
2022-03-26T14:31:30.000Z
""" Auswärtiges Amt OpenData Schnittstelle Dies ist die Beschreibung für die Schnittstelle zum Zugriff auf die Daten des [Auswärtigen Amtes](https://www.auswaertiges-amt.de/de/) im Rahmen der [OpenData](https://www.auswaertiges-amt.de/de/open-data-schnittstelle/736118) Initiative. ## Deaktivierung Die Schnittstelle kann deaktiviert werden, in dem Fall wird ein leeres JSON-Objekt zurückgegeben. ## Fehlerfall Im Fehlerfall wird ein leeres JSON-Objekt zurückgegeben. ## Nutzungsbedingungen Die Nutzungsbedingungen sind auf der [OpenData-Schnittstelle](https://www.auswaertiges-amt.de/de/open-data-schnittstelle/736118) des Auswärtigen Amtes zu finden. ## Änderungen ### version 1.0.1 (September 2021) * `content` (-> Details des Reise- und Sicherheitshinweis) wurde von [`/travelwarning`](#operations-default-getTravelwarning) entfernt -> bitte ab jetzt [`/travelwarning/{contentId}`](#operations-default-getSingleTravelwarning) nutzen um `content` abzufragen # noqa: E501 The version of the OpenAPI document: 1.0.1 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from deutschland.travelwarning.api_client import ApiClient from deutschland.travelwarning.api_client import Endpoint as _Endpoint from deutschland.travelwarning.model.response_address import ResponseAddress from deutschland.travelwarning.model.response_download import ResponseDownload from deutschland.travelwarning.model.response_warning import ResponseWarning from deutschland.travelwarning.model.response_warnings import ResponseWarnings from deutschland.travelwarning.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types, ) class DefaultApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client self.get_healthcare_endpoint = _Endpoint( settings={ "response_type": (ResponseDownload,), "auth": [], "endpoint_path": "/healthcare", "operation_id": "get_healthcare", "http_method": "GET", "servers": None, }, params_map={ "all": [], "required": [], "nullable": [], "enum": [], "validation": [], }, root_map={ "validations": {}, "allowed_values": {}, "openapi_types": {}, "attribute_map": {}, "location_map": {}, "collection_format_map": {}, }, headers_map={ "accept": ["application/json"], "content_type": [], }, api_client=api_client, ) self.get_representatives_country_endpoint = _Endpoint( settings={ "response_type": (ResponseAddress,), "auth": [], "endpoint_path": "/representativesInCountry", "operation_id": "get_representatives_country", "http_method": "GET", "servers": None, }, params_map={ "all": [], "required": [], "nullable": [], "enum": [], "validation": [], }, root_map={ "validations": {}, "allowed_values": {}, "openapi_types": {}, "attribute_map": {}, "location_map": {}, "collection_format_map": {}, }, headers_map={ "accept": ["text/json;charset=UTF-8"], "content_type": [], }, api_client=api_client, ) self.get_representatives_germany_endpoint = _Endpoint( settings={ "response_type": (ResponseAddress,), "auth": [], "endpoint_path": "/representativesInGermany", "operation_id": "get_representatives_germany", "http_method": "GET", "servers": None, }, params_map={ "all": [], "required": [], "nullable": [], "enum": [], "validation": [], }, root_map={ "validations": {}, "allowed_values": {}, "openapi_types": {}, "attribute_map": {}, "location_map": {}, "collection_format_map": {}, }, headers_map={ "accept": ["text/json;charset=UTF-8"], "content_type": [], }, api_client=api_client, ) self.get_single_travelwarning_endpoint = _Endpoint( settings={ "response_type": (ResponseWarning,), "auth": [], "endpoint_path": "/travelwarning/{contentId}", "operation_id": "get_single_travelwarning", "http_method": "GET", "servers": None, }, params_map={ "all": [ "content_id", ], "required": [ "content_id", ], "nullable": [], "enum": [], "validation": [ "content_id", ], }, root_map={ "validations": { ("content_id",): { "inclusive_minimum": 1, }, }, "allowed_values": {}, "openapi_types": { "content_id": (int,), }, "attribute_map": { "content_id": "contentId", }, "location_map": { "content_id": "path", }, "collection_format_map": {}, }, headers_map={ "accept": ["text/json;charset=UTF-8"], "content_type": [], }, api_client=api_client, ) self.get_state_names_endpoint = _Endpoint( settings={ "response_type": (ResponseDownload,), "auth": [], "endpoint_path": "/stateNames", "operation_id": "get_state_names", "http_method": "GET", "servers": None, }, params_map={ "all": [], "required": [], "nullable": [], "enum": [], "validation": [], }, root_map={ "validations": {}, "allowed_values": {}, "openapi_types": {}, "attribute_map": {}, "location_map": {}, "collection_format_map": {}, }, headers_map={ "accept": ["application/json"], "content_type": [], }, api_client=api_client, ) self.get_travelwarning_endpoint = _Endpoint( settings={ "response_type": (ResponseWarnings,), "auth": [], "endpoint_path": "/travelwarning", "operation_id": "get_travelwarning", "http_method": "GET", "servers": None, }, params_map={ "all": [], "required": [], "nullable": [], "enum": [], "validation": [], }, root_map={ "validations": {}, "allowed_values": {}, "openapi_types": {}, "attribute_map": {}, "location_map": {}, "collection_format_map": {}, }, headers_map={ "accept": ["text/json;charset=UTF-8"], "content_type": [], }, api_client=api_client, ) def get_healthcare(self, **kwargs): """Gibt die Merkblätter des Gesundheitsdienstes zurück # noqa: E501 Merkblätter des Gesundheitsdienstes als Link auf ein PDF # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_healthcare(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseDownload If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") return self.get_healthcare_endpoint.call_with_http_info(**kwargs) def get_representatives_country(self, **kwargs): """Gibt eine Liste der deutschen Vertretungen im Ausland zurück # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_representatives_country(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseAddress If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") return self.get_representatives_country_endpoint.call_with_http_info(**kwargs) def get_representatives_germany(self, **kwargs): """Gibt eine Liste der ausländischen Vertretungen in Deutschland zurück # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_representatives_germany(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseAddress If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") return self.get_representatives_germany_endpoint.call_with_http_info(**kwargs) def get_single_travelwarning(self, content_id, **kwargs): """Gibt einen Reise- und Sicherheitshinweis zurück # noqa: E501 Gibt den vollständigen Datensatz eines Reise- und Sicherheitshinweises zurück. Benötigt die jeweilige ID siehe /travelwarning # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_single_travelwarning(content_id, async_req=True) >>> result = thread.get() Args: content_id (int): Die ID des Reise- und Sicherheitshinweises, IDs siehe /travelwarning Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseWarning If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") kwargs["content_id"] = content_id return self.get_single_travelwarning_endpoint.call_with_http_info(**kwargs) def get_state_names(self, **kwargs): """Gibt das Verzeichnis der Staatennamen zurück # noqa: E501 Verzeichnis der Staatennamen als Link auf eine XML- oder CSV-Datei. Eine PDF-Datei mit Nutzungshinweisen wird ebenfalls zurückgegeben. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_state_names(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseDownload If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") return self.get_state_names_endpoint.call_with_http_info(**kwargs) def get_travelwarning(self, **kwargs): """Gibt alle Reise- und Sicherheitshinweise zurück # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_travelwarning(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: ResponseWarnings If the method is called asynchronously, returns the request thread. """ kwargs["async_req"] = kwargs.get("async_req", False) kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) kwargs["_preload_content"] = kwargs.get("_preload_content", True) kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) kwargs["_host_index"] = kwargs.get("_host_index") return self.get_travelwarning_endpoint.call_with_http_info(**kwargs)
44.169261
939
0.573316
2,324
22,703
5.369191
0.120482
0.030293
0.025004
0.025966
0.798125
0.779612
0.748598
0.74411
0.735454
0.698269
0
0.004767
0.334713
22,703
513
940
44.255361
0.821372
0.440823
0
0.626374
0
0
0.265224
0.056872
0
0
0
0
0
1
0.025641
false
0
0.032967
0
0.084249
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f7de482bf794d9b7b199f206ea0ed8fd099b3454
117
py
Python
Musofirlar.Website/src/work/admin.py
SanjarbekSaminjonov/Musofirlar.uz
aab647e06c2f06979408d8f2d2a78758a8b3c65c
[ "Apache-2.0" ]
3
2021-12-22T09:01:50.000Z
2021-12-23T18:29:12.000Z
Musofirlar.Website/src/work/admin.py
SanjarbekSaminjonov/Musofirlar.uz
aab647e06c2f06979408d8f2d2a78758a8b3c65c
[ "Apache-2.0" ]
null
null
null
Musofirlar.Website/src/work/admin.py
SanjarbekSaminjonov/Musofirlar.uz
aab647e06c2f06979408d8f2d2a78758a8b3c65c
[ "Apache-2.0" ]
1
2021-12-28T06:15:33.000Z
2021-12-28T06:15:33.000Z
from django.contrib import admin from .models import Work # Register your models here. admin.site.register(Work)
13
32
0.777778
17
117
5.352941
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.153846
117
8
33
14.625
0.919192
0.222222
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
790655962fe5f0dedfca81c1496d202aa334601e
47
py
Python
hello_world.py
jzfarmer/learning_python
279fc19d4405625b49f853575252bf1dee3cbb99
[ "MIT" ]
null
null
null
hello_world.py
jzfarmer/learning_python
279fc19d4405625b49f853575252bf1dee3cbb99
[ "MIT" ]
null
null
null
hello_world.py
jzfarmer/learning_python
279fc19d4405625b49f853575252bf1dee3cbb99
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 print( 'hello world' )
11.75
22
0.659574
7
47
4.428571
1
0
0
0
0
0
0
0
0
0
0
0.025
0.148936
47
3
23
15.666667
0.75
0.446809
0
0
0
0
0.44
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
790c4c9fb5edb13bf70de321a0f4fc05978d80e0
13,229
py
Python
test/test_process.py
rockyplum/vampy-host
a410d680be2c15d76e31488db789ed30e6f34910
[ "BSD-4-Clause-UC" ]
16
2016-11-19T07:24:54.000Z
2021-07-09T23:30:48.000Z
test/test_process.py
rockyplum/vampy-host
a410d680be2c15d76e31488db789ed30e6f34910
[ "BSD-4-Clause-UC" ]
6
2017-04-05T12:00:38.000Z
2022-01-13T17:51:34.000Z
test/test_process.py
rockyplum/vampy-host
a410d680be2c15d76e31488db789ed30e6f34910
[ "BSD-4-Clause-UC" ]
1
2017-04-03T16:33:51.000Z
2017-04-03T16:33:51.000Z
import vamp import numpy as np import vamp.frames as fr plugin_key = "vamp-test-plugin:vamp-test-plugin" plugin_key_freq = "vamp-test-plugin:vamp-test-plugin-freq" rate = 44100 # Throughout this file we have the assumption that the plugin gets run with a # blocksize of 1024, and with a step of 1024 for the time-domain version or 512 # for the frequency-domain one. That is certainly expected to be the norm for a # plugin like this that declares no preference, and the Python Vamp module is # expected to follow the norm. blocksize = 1024 def input_data(n): # start at 1, not 0 so that all elts are non-zero return np.arange(n) + 1 def test_process_n(): buf = input_data(blocksize) results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary")) assert len(results) == 1 def test_process_freq_n(): buf = input_data(blocksize) results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {})) assert len(results) == 2 # one complete block starting at zero, one half-full def test_process_default_output(): # If no output is specified, we should get the first one (instants) buf = input_data(blocksize) results = list(vamp.process_audio(buf, rate, plugin_key, "", {})) assert len(results) == 10 for i in range(len(results)): expectedTime = vamp.vampyhost.RealTime('seconds', i * 1.5) actualTime = results[i]["timestamp"] assert expectedTime == actualTime def test_process_summary_param(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": 0 })) assert len(results) == 0 def test_process_multi_summary_param(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": 0 })) assert len(results) == 0 def test_process_summary_param_bool(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", { "produce_output": False })) assert len(results) == 0 def test_process_multi_summary_param_bool(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], { "produce_output": False })) assert len(results) == 0 def test_process_summary(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-summary", {})) assert len(results) == 10 for i in range(len(results)): # # each feature has a single value, equal to the number of non-zero elts # in the input block (which is all of them, i.e. the blocksize) plus # the first elt (which is i * blockSize + 1) # expected = blocksize + i * blocksize + 1 actual = results[i]["values"][0] assert actual == expected def test_process_frames_summary(): buf = input_data(blocksize * 10) ff = fr.frames_from_array(buf, blocksize, blocksize) results = list(vamp.process_frames(ff, rate, blocksize, plugin_key, "input-summary", {})) assert len(results) == 10 for i in range(len(results)): # # each feature has a single value, equal to the number of non-zero elts # in the input block (which is all of them, i.e. the blocksize) plus # the first elt (which is i * blockSize + 1) # expected = blocksize + i * blocksize + 1 actual = results[i]["values"][0] assert actual == expected def test_process_multi_summary(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary" ], {})) assert len(results) == 10 for i in range(len(results)): # # each feature has a single value, equal to the number of non-zero elts # in the input block (which is all of them, i.e. the blocksize) plus # the first elt (which is i * blockSize + 1) # expected = blocksize + i * blocksize + 1 actual = results[i]["input-summary"]["values"][0] assert actual == expected def test_process_frames_multi_summary(): buf = input_data(blocksize * 10) ff = fr.frames_from_array(buf, blocksize, blocksize) results = list(vamp.process_frames_multiple_outputs(ff, rate, blocksize, plugin_key, [ "input-summary" ], {})) assert len(results) == 10 for i in range(len(results)): # # each feature has a single value, equal to the number of non-zero elts # in the input block (which is all of them, i.e. the blocksize) plus # the first elt (which is i * blockSize + 1) # expected = blocksize + i * blocksize + 1 actual = results[i]["input-summary"]["values"][0] assert actual == expected def test_process_freq_summary(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {})) assert len(results) == 20 for i in range(len(results)): # # sort of as above, but much much subtler: # # * the input block is converted to frequency domain but then converted # back within the plugin, so the values being reported are time-domain # ones but with windowing and FFT shift # # * the effect of FFT shift is that the first element in the # re-converted frame is actually the one that was at the start of the # second half of the original frame # # * and the last block is only half-full, so the "first" elt in that # one, which actually comes from just after the middle of the block, # will be zero # # * windowing does not affect the value of the first elt, because # (before fft shift) it came from the peak of the window shape where # the window value is 1 # # * but windowing does affect the number of non-zero elts, because the # asymmetric window used has one value very close to zero in it # # * the step size (the increment in input value from one block to the # next) is only half the block size # expected = i * (blocksize/2) + blocksize/2 + 1 # "first" elt if (i == len(results)-1): expected = 0 expected = expected + blocksize - 1 # non-zero elts actual = results[i]["values"][0] eps = 1e-6 assert abs(actual - expected) < eps def test_process_freq_summary_shift(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-summary", {}, process_timestamp_method = vamp.vampyhost.SHIFT_DATA)) assert len(results) == 20 for i in range(len(results)): # as test_process_freq_summary, except that the input is effectively # padded by the adapter with an additional half-blocksize of zeros # before conversion if i == 0: # this block doesn't interact at all well with our test, we get # spurious low values in the block converted back within the plugin # because of the big discontinuity & window ripple after fftshift pass else: expected = (i-1) * (blocksize/2) + blocksize/2 + 1 # for "first" elt expected = expected + blocksize - 1 # non-zero elts actual = results[i]["values"][0] eps = 1e-6 assert abs(actual - expected) < eps def test_process_multi_freq_summary(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-summary" ], {})) assert len(results) == 20 for i in range(len(results)): expected = i * (blocksize/2) + blocksize/2 + 1 # "first" elt if (i == len(results)-1): expected = 0 expected = expected + blocksize - 1 # non-zero elts actual = results[i]["input-summary"]["values"][0] eps = 1e-6 assert abs(actual - expected) < eps def test_process_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {})) assert len(results) == 10 for i in range(len(results)): # The timestamp should be the frame number of the first frame in the # input buffer expected = i * blocksize actual = results[i]["values"][0] assert actual == expected def test_process_multi_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-timestamp" ])) assert len(results) == 10 for i in range(len(results)): # The timestamp should be the frame number of the first frame in the # input buffer expected = i * blocksize actual = results[i]["input-timestamp"]["values"][0] assert actual == expected def test_process_freq_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", {})) assert len(results) == 20 for i in range(len(results)): # The timestamp should be the frame number of the frame just beyond # half-way through the input buffer expected = i * (blocksize/2) + blocksize/2 actual = results[i]["values"][0] if actual == 2047 and expected == 2048: print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.") assert actual == expected def test_process_freq_shift_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key_freq, "input-timestamp", process_timestamp_method = vamp.vampyhost.SHIFT_DATA)) assert len(results) == 20 for i in range(len(results)): # The timestamp should be the frame number of the frame at the start of # the input buffer expected = i * (blocksize/2) actual = results[i]["values"][0] if actual == 2047 and expected == 2048: print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.") assert actual == expected def test_process_multi_freq_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ "input-timestamp" ], {})) assert len(results) == 20 for i in range(len(results)): # The timestamp should be the frame number of the frame just beyond # half-way through the input buffer expected = i * (blocksize/2) + blocksize/2 actual = results[i]["input-timestamp"]["values"][0] if actual == 2047 and expected == 2048: print("This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.") assert actual == expected def test_process_blocksize_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2)) # step size defaults to block size assert len(results) == 5 for i in range(len(results)): # The timestamp should be the frame number of the first frame in the # input buffer expected = i * blocksize * 2 actual = results[i]["values"][0] assert actual == expected def test_process_stepsize_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, step_size = int(blocksize / 2))) assert len(results) == 20 for i in range(len(results)): # The timestamp should be the frame number of the first frame in the # input buffer expected = (i * blocksize) / 2 actual = results[i]["values"][0] assert actual == expected def test_process_stepsize_blocksize_timestamps(): buf = input_data(blocksize * 10) results = list(vamp.process_audio(buf, rate, plugin_key, "input-timestamp", {}, block_size = blocksize * 2, step_size = int(blocksize / 2))) assert len(results) == 20 for i in range(len(results)): # The timestamp should be the frame number of the first frame in the # input buffer expected = (i * blocksize) / 2 actual = results[i]["values"][0] assert actual == expected def test_process_multiple_outputs(): buf = input_data(blocksize * 10) results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ "input-summary", "input-timestamp" ], {})) assert len(results) == 20 si = 0 ti = 0 for r in results: assert "input-summary" in r or "input-timestamp" in r if "input-summary" in r: expected = blocksize + si * blocksize + 1 actual = r["input-summary"]["values"][0] assert actual == expected si = si + 1 if "input-timestamp" in r: expected = ti * blocksize actual = r["input-timestamp"]["values"][0] assert actual == expected ti = ti + 1
43.516447
147
0.642452
1,826
13,229
4.54874
0.119934
0.049362
0.038767
0.058151
0.776547
0.760414
0.747171
0.731038
0.731038
0.724657
0
0.020584
0.254517
13,229
303
148
43.660066
0.821639
0.247789
0
0.615
0
0.015
0.099828
0.007196
0
0
0
0
0.21
1
0.12
false
0.005
0.015
0.005
0.14
0.015
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
792cca4ef724bdafadab89c2e5b7d60a9a206772
62
py
Python
lib/tamper_scripts/uppercase_encode.py
ikstream/Zeus-Scanner
272839c44a950b9c3c9e6d83a2c76fdd51cd010b
[ "RSA-MD" ]
841
2017-09-08T10:25:35.000Z
2022-03-20T14:27:09.000Z
lib/tamper_scripts/uppercase_encode.py
An0nYm0u5101/Zeus-Scanner
21b87563062326cd480669f2922f650173a2a18e
[ "RSA-MD" ]
1,121
2017-09-21T14:42:50.000Z
2022-03-01T16:49:19.000Z
lib/tamper_scripts/uppercase_encode.py
An0nYm0u5101/Zeus-Scanner
21b87563062326cd480669f2922f650173a2a18e
[ "RSA-MD" ]
243
2017-09-09T21:35:50.000Z
2022-01-06T22:38:54.000Z
def tamper(payload, **kwargs): return str(payload).upper()
31
31
0.693548
8
62
5.375
0.875
0
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
31
31
0.796296
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
7937f70a576a0bc4ed7922f73d7bd243fbf0639d
83
py
Python
app/database/base.py
WendyHzo/appBackend
cb7319a5e8635038afc54e2ea8b2a05bc943077a
[ "Apache-2.0" ]
null
null
null
app/database/base.py
WendyHzo/appBackend
cb7319a5e8635038afc54e2ea8b2a05bc943077a
[ "Apache-2.0" ]
null
null
null
app/database/base.py
WendyHzo/appBackend
cb7319a5e8635038afc54e2ea8b2a05bc943077a
[ "Apache-2.0" ]
null
null
null
from app.database.base_class import Base from app.models.liquors import licorera
27.666667
41
0.831325
13
83
5.230769
0.692308
0.205882
0
0
0
0
0
0
0
0
0
0
0.120482
83
2
42
41.5
0.931507
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f70e4517d1db8c8e699000ac090e9cb2e6804237
68
py
Python
leo/test/unittest/at-nosent-test.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
2
2020-01-19T18:11:05.000Z
2020-01-19T18:12:07.000Z
leo/test/unittest/at-nosent-test.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
1
2020-06-19T02:28:25.000Z
2020-06-19T02:28:25.000Z
leo/test/unittest/at-nosent-test.py
ATikhonov2/leo-editor
225aac990a9b2804aaa9dea29574d6e072e30474
[ "MIT" ]
null
null
null
def spam(): pass # Unicode test: Ã after. def eggs(): pass
11.333333
33
0.573529
10
68
3.9
0.8
0
0
0
0
0
0
0
0
0
0
0
0.294118
68
5
34
13.6
0.8125
0.323529
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
f736a3517e3c5f420c40d3bfa794b57a3d8b2046
17,362
py
Python
balance.py
Charterhouse/random_forest
b842f08fee1054dbff78b6fb3afd4006a7f14a6d
[ "MIT" ]
2
2019-10-24T07:22:46.000Z
2019-11-18T12:32:26.000Z
balance.py
Charterhouse/random_forest
b842f08fee1054dbff78b6fb3afd4006a7f14a6d
[ "MIT" ]
null
null
null
balance.py
Charterhouse/random_forest
b842f08fee1054dbff78b6fb3afd4006a7f14a6d
[ "MIT" ]
2
2020-03-03T18:30:14.000Z
2021-09-06T13:55:06.000Z
from mpyc.runtime import mpc from src.dataset import ObliviousDataset, Sample from src.output import output from src.secint import secint as s from src.forest import train_forest def sample(ins, out): return Sample([s(i) for i in ins], s(out)) samples = ObliviousDataset.create( sample([1, 1, 1, 2], 1), sample([1, 1, 1, 3], 1), sample([1, 1, 1, 4], 1), sample([1, 1, 1, 5], 1), sample([1, 1, 2, 1], 1), sample([1, 1, 2, 2], 1), sample([1, 1, 2, 3], 1), sample([1, 1, 2, 4], 1), sample([1, 1, 2, 5], 1), sample([1, 1, 3, 1], 1), sample([1, 1, 3, 2], 1), sample([1, 1, 3, 3], 1), sample([1, 1, 3, 4], 1), sample([1, 1, 3, 5], 1), sample([1, 1, 4, 1], 1), sample([1, 1, 4, 2], 1), sample([1, 1, 4, 3], 1), sample([1, 1, 4, 4], 1), sample([1, 1, 4, 5], 1), sample([1, 1, 5, 1], 1), sample([1, 1, 5, 2], 1), sample([1, 1, 5, 3], 1), sample([1, 1, 5, 4], 1), sample([1, 1, 5, 5], 1), sample([1, 2, 1, 1], 0), sample([1, 2, 1, 3], 1), sample([1, 2, 1, 4], 1), sample([1, 2, 1, 5], 1), sample([1, 2, 2, 2], 1), sample([1, 2, 2, 3], 1), sample([1, 2, 2, 4], 1), sample([1, 2, 2, 5], 1), sample([1, 2, 3, 1], 1), sample([1, 2, 3, 2], 1), sample([1, 2, 3, 3], 1), sample([1, 2, 3, 4], 1), sample([1, 2, 3, 5], 1), sample([1, 2, 4, 1], 1), sample([1, 2, 4, 2], 1), sample([1, 2, 4, 3], 1), sample([1, 2, 4, 4], 1), sample([1, 2, 4, 5], 1), sample([1, 2, 5, 1], 1), sample([1, 2, 5, 2], 1), sample([1, 2, 5, 3], 1), sample([1, 2, 5, 4], 1), sample([1, 2, 5, 5], 1), sample([1, 3, 1, 1], 0), sample([1, 3, 1, 2], 0), sample([1, 3, 1, 4], 1), sample([1, 3, 1, 5], 1), sample([1, 3, 2, 1], 0), sample([1, 3, 2, 2], 1), sample([1, 3, 2, 3], 1), sample([1, 3, 2, 4], 1), sample([1, 3, 2, 5], 1), sample([1, 3, 3, 2], 1), sample([1, 3, 3, 3], 1), sample([1, 3, 3, 4], 1), sample([1, 3, 3, 5], 1), sample([1, 3, 4, 1], 1), sample([1, 3, 4, 2], 1), sample([1, 3, 4, 3], 1), sample([1, 3, 4, 4], 1), sample([1, 3, 4, 5], 1), sample([1, 3, 5, 1], 1), sample([1, 3, 5, 2], 1), sample([1, 3, 5, 3], 1), sample([1, 3, 5, 4], 1), sample([1, 3, 5, 5], 1), sample([1, 4, 1, 1], 0), sample([1, 4, 1, 2], 0), sample([1, 4, 1, 3], 0), sample([1, 4, 1, 5], 1), sample([1, 4, 2, 1], 0), sample([1, 4, 2, 3], 1), sample([1, 4, 2, 4], 1), sample([1, 4, 2, 5], 1), sample([1, 4, 3, 1], 0), sample([1, 4, 3, 2], 1), sample([1, 4, 3, 3], 1), sample([1, 4, 3, 4], 1), sample([1, 4, 3, 5], 1), sample([1, 4, 4, 2], 1), sample([1, 4, 4, 3], 1), sample([1, 4, 4, 4], 1), sample([1, 4, 4, 5], 1), sample([1, 4, 5, 1], 1), sample([1, 4, 5, 2], 1), sample([1, 4, 5, 3], 1), sample([1, 4, 5, 4], 1), sample([1, 4, 5, 5], 1), sample([1, 5, 1, 1], 0), sample([1, 5, 1, 2], 0), sample([1, 5, 1, 3], 0), sample([1, 5, 1, 4], 0), sample([1, 5, 2, 1], 0), sample([1, 5, 2, 2], 0), sample([1, 5, 2, 3], 1), sample([1, 5, 2, 4], 1), sample([1, 5, 2, 5], 1), sample([1, 5, 3, 1], 0), sample([1, 5, 3, 2], 1), sample([1, 5, 3, 3], 1), sample([1, 5, 3, 4], 1), sample([1, 5, 3, 5], 1), sample([1, 5, 4, 1], 0), sample([1, 5, 4, 2], 1), sample([1, 5, 4, 3], 1), sample([1, 5, 4, 4], 1), sample([1, 5, 4, 5], 1), sample([1, 5, 5, 2], 1), sample([1, 5, 5, 3], 1), sample([1, 5, 5, 4], 1), sample([1, 5, 5, 5], 1), sample([2, 1, 1, 1], 0), sample([2, 1, 1, 3], 1), sample([2, 1, 1, 4], 1), sample([2, 1, 1, 5], 1), sample([2, 1, 2, 2], 1), sample([2, 1, 2, 3], 1), sample([2, 1, 2, 4], 1), sample([2, 1, 2, 5], 1), sample([2, 1, 3, 1], 1), sample([2, 1, 3, 2], 1), sample([2, 1, 3, 3], 1), sample([2, 1, 3, 4], 1), sample([2, 1, 3, 5], 1), sample([2, 1, 4, 1], 1), sample([2, 1, 4, 2], 1), sample([2, 1, 4, 3], 1), sample([2, 1, 4, 4], 1), sample([2, 1, 4, 5], 1), sample([2, 1, 5, 1], 1), sample([2, 1, 5, 2], 1), sample([2, 1, 5, 3], 1), sample([2, 1, 5, 4], 1), sample([2, 1, 5, 5], 1), sample([2, 2, 1, 1], 0), sample([2, 2, 1, 2], 0), sample([2, 2, 1, 3], 0), sample([2, 2, 1, 5], 1), sample([2, 2, 2, 1], 0), sample([2, 2, 2, 3], 1), sample([2, 2, 2, 4], 1), sample([2, 2, 2, 5], 1), sample([2, 2, 3, 1], 0), sample([2, 2, 3, 2], 1), sample([2, 2, 3, 3], 1), sample([2, 2, 3, 4], 1), sample([2, 2, 3, 5], 1), sample([2, 2, 4, 2], 1), sample([2, 2, 4, 3], 1), sample([2, 2, 4, 4], 1), sample([2, 2, 4, 5], 1), sample([2, 2, 5, 1], 1), sample([2, 2, 5, 2], 1), sample([2, 2, 5, 3], 1), sample([2, 2, 5, 4], 1), sample([2, 2, 5, 5], 1), sample([2, 3, 1, 1], 0), sample([2, 3, 1, 2], 0), sample([2, 3, 1, 3], 0), sample([2, 3, 1, 4], 0), sample([2, 3, 1, 5], 0), sample([2, 3, 2, 1], 0), sample([2, 3, 2, 2], 0), sample([2, 3, 2, 4], 1), sample([2, 3, 2, 5], 1), sample([2, 3, 3, 1], 0), sample([2, 3, 3, 3], 1), sample([2, 3, 3, 4], 1), sample([2, 3, 3, 5], 1), sample([2, 3, 4, 1], 0), sample([2, 3, 4, 2], 1), sample([2, 3, 4, 3], 1), sample([2, 3, 4, 4], 1), sample([2, 3, 4, 5], 1), sample([2, 3, 5, 1], 0), sample([2, 3, 5, 2], 1), sample([2, 3, 5, 3], 1), sample([2, 3, 5, 4], 1), sample([2, 3, 5, 5], 1), sample([2, 4, 1, 1], 0), sample([2, 4, 1, 2], 0), sample([2, 4, 1, 3], 0), sample([2, 4, 1, 4], 0), sample([2, 4, 1, 5], 0), sample([2, 4, 2, 1], 0), sample([2, 4, 2, 2], 0), sample([2, 4, 2, 3], 0), sample([2, 4, 2, 5], 1), sample([2, 4, 3, 1], 0), sample([2, 4, 3, 2], 0), sample([2, 4, 3, 3], 1), sample([2, 4, 3, 4], 1), sample([2, 4, 3, 5], 1), sample([2, 4, 4, 1], 0), sample([2, 4, 4, 3], 1), sample([2, 4, 4, 4], 1), sample([2, 4, 4, 5], 1), sample([2, 4, 5, 1], 0), sample([2, 4, 5, 2], 1), sample([2, 4, 5, 3], 1), sample([2, 4, 5, 4], 1), sample([2, 4, 5, 5], 1), sample([2, 5, 1, 1], 0), sample([2, 5, 1, 2], 0), sample([2, 5, 1, 3], 0), sample([2, 5, 1, 4], 0), sample([2, 5, 1, 5], 0), sample([2, 5, 2, 1], 0), sample([2, 5, 2, 2], 0), sample([2, 5, 2, 3], 0), sample([2, 5, 2, 4], 0), sample([2, 5, 3, 1], 0), sample([2, 5, 3, 2], 0), sample([2, 5, 3, 3], 0), sample([2, 5, 3, 4], 1), sample([2, 5, 3, 5], 1), sample([2, 5, 4, 1], 0), sample([2, 5, 4, 2], 0), sample([2, 5, 4, 3], 1), sample([2, 5, 4, 4], 1), sample([2, 5, 4, 5], 1), sample([2, 5, 5, 1], 0), sample([2, 5, 5, 3], 1), sample([2, 5, 5, 4], 1), sample([2, 5, 5, 5], 1), sample([3, 1, 1, 1], 0), sample([3, 1, 1, 2], 0), sample([3, 1, 1, 4], 1), sample([3, 1, 1, 5], 1), sample([3, 1, 2, 1], 0), sample([3, 1, 2, 2], 1), sample([3, 1, 2, 3], 1), sample([3, 1, 2, 4], 1), sample([3, 1, 2, 5], 1), sample([3, 1, 3, 2], 1), sample([3, 1, 3, 3], 1), sample([3, 1, 3, 4], 1), sample([3, 1, 3, 5], 1), sample([3, 1, 4, 1], 1), sample([3, 1, 4, 2], 1), sample([3, 1, 4, 3], 1), sample([3, 1, 4, 4], 1), sample([3, 1, 4, 5], 1), sample([3, 1, 5, 1], 1), sample([3, 1, 5, 2], 1), sample([3, 1, 5, 3], 1), sample([3, 1, 5, 4], 1), sample([3, 1, 5, 5], 1), sample([3, 2, 1, 1], 0), sample([3, 2, 1, 2], 0), sample([3, 2, 1, 3], 0), sample([3, 2, 1, 4], 0), sample([3, 2, 1, 5], 0), sample([3, 2, 2, 1], 0), sample([3, 2, 2, 2], 0), sample([3, 2, 2, 4], 1), sample([3, 2, 2, 5], 1), sample([3, 2, 3, 1], 0), sample([3, 2, 3, 3], 1), sample([3, 2, 3, 4], 1), sample([3, 2, 3, 5], 1), sample([3, 2, 4, 1], 0), sample([3, 2, 4, 2], 1), sample([3, 2, 4, 3], 1), sample([3, 2, 4, 4], 1), sample([3, 2, 4, 5], 1), sample([3, 2, 5, 1], 0), sample([3, 2, 5, 2], 1), sample([3, 2, 5, 3], 1), sample([3, 2, 5, 4], 1), sample([3, 2, 5, 5], 1), sample([3, 3, 1, 1], 0), sample([3, 3, 1, 2], 0), sample([3, 3, 1, 3], 0), sample([3, 3, 1, 4], 0), sample([3, 3, 1, 5], 0), sample([3, 3, 2, 1], 0), sample([3, 3, 2, 2], 0), sample([3, 3, 2, 3], 0), sample([3, 3, 2, 4], 0), sample([3, 3, 2, 5], 1), sample([3, 3, 3, 1], 0), sample([3, 3, 3, 2], 0), sample([3, 3, 3, 4], 1), sample([3, 3, 3, 5], 1), sample([3, 3, 4, 1], 0), sample([3, 3, 4, 2], 0), sample([3, 3, 4, 3], 1), sample([3, 3, 4, 4], 1), sample([3, 3, 4, 5], 1), sample([3, 3, 5, 1], 0), sample([3, 3, 5, 2], 1), sample([3, 3, 5, 3], 1), sample([3, 3, 5, 4], 1), sample([3, 3, 5, 5], 1), sample([3, 4, 1, 1], 0), sample([3, 4, 1, 2], 0), sample([3, 4, 1, 3], 0), sample([3, 4, 1, 4], 0), sample([3, 4, 1, 5], 0), sample([3, 4, 2, 1], 0), sample([3, 4, 2, 2], 0), sample([3, 4, 2, 3], 0), sample([3, 4, 2, 4], 0), sample([3, 4, 2, 5], 0), sample([3, 4, 3, 1], 0), sample([3, 4, 3, 2], 0), sample([3, 4, 3, 3], 0), sample([3, 4, 3, 5], 1), sample([3, 4, 4, 1], 0), sample([3, 4, 4, 2], 0), sample([3, 4, 4, 4], 1), sample([3, 4, 4, 5], 1), sample([3, 4, 5, 1], 0), sample([3, 4, 5, 2], 0), sample([3, 4, 5, 3], 1), sample([3, 4, 5, 4], 1), sample([3, 4, 5, 5], 1), sample([3, 5, 1, 1], 0), sample([3, 5, 1, 2], 0), sample([3, 5, 1, 3], 0), sample([3, 5, 1, 4], 0), sample([3, 5, 1, 5], 0), sample([3, 5, 2, 1], 0), sample([3, 5, 2, 2], 0), sample([3, 5, 2, 3], 0), sample([3, 5, 2, 4], 0), sample([3, 5, 2, 5], 0), sample([3, 5, 3, 1], 0), sample([3, 5, 3, 2], 0), sample([3, 5, 3, 3], 0), sample([3, 5, 3, 4], 0), sample([3, 5, 4, 1], 0), sample([3, 5, 4, 2], 0), sample([3, 5, 4, 3], 0), sample([3, 5, 4, 4], 1), sample([3, 5, 4, 5], 1), sample([3, 5, 5, 1], 0), sample([3, 5, 5, 2], 0), sample([3, 5, 5, 4], 1), sample([3, 5, 5, 5], 1), sample([4, 1, 1, 1], 0), sample([4, 1, 1, 2], 0), sample([4, 1, 1, 3], 0), sample([4, 1, 1, 5], 1), sample([4, 1, 2, 1], 0), sample([4, 1, 2, 3], 1), sample([4, 1, 2, 4], 1), sample([4, 1, 2, 5], 1), sample([4, 1, 3, 1], 0), sample([4, 1, 3, 2], 1), sample([4, 1, 3, 3], 1), sample([4, 1, 3, 4], 1), sample([4, 1, 3, 5], 1), sample([4, 1, 4, 2], 1), sample([4, 1, 4, 3], 1), sample([4, 1, 4, 4], 1), sample([4, 1, 4, 5], 1), sample([4, 1, 5, 1], 1), sample([4, 1, 5, 2], 1), sample([4, 1, 5, 3], 1), sample([4, 1, 5, 4], 1), sample([4, 1, 5, 5], 1), sample([4, 2, 1, 1], 0), sample([4, 2, 1, 2], 0), sample([4, 2, 1, 3], 0), sample([4, 2, 1, 4], 0), sample([4, 2, 1, 5], 0), sample([4, 2, 2, 1], 0), sample([4, 2, 2, 2], 0), sample([4, 2, 2, 3], 0), sample([4, 2, 2, 5], 1), sample([4, 2, 3, 1], 0), sample([4, 2, 3, 2], 0), sample([4, 2, 3, 3], 1), sample([4, 2, 3, 4], 1), sample([4, 2, 3, 5], 1), sample([4, 2, 4, 1], 0), sample([4, 2, 4, 3], 1), sample([4, 2, 4, 4], 1), sample([4, 2, 4, 5], 1), sample([4, 2, 5, 1], 0), sample([4, 2, 5, 2], 1), sample([4, 2, 5, 3], 1), sample([4, 2, 5, 4], 1), sample([4, 2, 5, 5], 1), sample([4, 3, 1, 1], 0), sample([4, 3, 1, 2], 0), sample([4, 3, 1, 3], 0), sample([4, 3, 1, 4], 0), sample([4, 3, 1, 5], 0), sample([4, 3, 2, 1], 0), sample([4, 3, 2, 2], 0), sample([4, 3, 2, 3], 0), sample([4, 3, 2, 4], 0), sample([4, 3, 2, 5], 0), sample([4, 3, 3, 1], 0), sample([4, 3, 3, 2], 0), sample([4, 3, 3, 3], 0), sample([4, 3, 3, 5], 1), sample([4, 3, 4, 1], 0), sample([4, 3, 4, 2], 0), sample([4, 3, 4, 4], 1), sample([4, 3, 4, 5], 1), sample([4, 3, 5, 1], 0), sample([4, 3, 5, 2], 0), sample([4, 3, 5, 3], 1), sample([4, 3, 5, 4], 1), sample([4, 3, 5, 5], 1), sample([4, 4, 1, 1], 0), sample([4, 4, 1, 2], 0), sample([4, 4, 1, 3], 0), sample([4, 4, 1, 4], 0), sample([4, 4, 1, 5], 0), sample([4, 4, 2, 1], 0), sample([4, 4, 2, 2], 0), sample([4, 4, 2, 3], 0), sample([4, 4, 2, 4], 0), sample([4, 4, 2, 5], 0), sample([4, 4, 3, 1], 0), sample([4, 4, 3, 2], 0), sample([4, 4, 3, 3], 0), sample([4, 4, 3, 4], 0), sample([4, 4, 3, 5], 0), sample([4, 4, 4, 1], 0), sample([4, 4, 4, 2], 0), sample([4, 4, 4, 3], 0), sample([4, 4, 4, 5], 1), sample([4, 4, 5, 1], 0), sample([4, 4, 5, 2], 0), sample([4, 4, 5, 3], 0), sample([4, 4, 5, 4], 1), sample([4, 4, 5, 5], 1), sample([4, 5, 1, 1], 0), sample([4, 5, 1, 2], 0), sample([4, 5, 1, 3], 0), sample([4, 5, 1, 4], 0), sample([4, 5, 1, 5], 0), sample([4, 5, 2, 1], 0), sample([4, 5, 2, 2], 0), sample([4, 5, 2, 3], 0), sample([4, 5, 2, 4], 0), sample([4, 5, 2, 5], 0), sample([4, 5, 3, 1], 0), sample([4, 5, 3, 2], 0), sample([4, 5, 3, 3], 0), sample([4, 5, 3, 4], 0), sample([4, 5, 3, 5], 0), sample([4, 5, 4, 1], 0), sample([4, 5, 4, 2], 0), sample([4, 5, 4, 3], 0), sample([4, 5, 4, 4], 0), sample([4, 5, 5, 1], 0), sample([4, 5, 5, 2], 0), sample([4, 5, 5, 3], 0), sample([4, 5, 5, 5], 1), sample([5, 1, 1, 1], 0), sample([5, 1, 1, 2], 0), sample([5, 1, 1, 3], 0), sample([5, 1, 1, 4], 0), sample([5, 1, 2, 1], 0), sample([5, 1, 2, 2], 0), sample([5, 1, 2, 3], 1), sample([5, 1, 2, 4], 1), sample([5, 1, 2, 5], 1), sample([5, 1, 3, 1], 0), sample([5, 1, 3, 2], 1), sample([5, 1, 3, 3], 1), sample([5, 1, 3, 4], 1), sample([5, 1, 3, 5], 1), sample([5, 1, 4, 1], 0), sample([5, 1, 4, 2], 1), sample([5, 1, 4, 3], 1), sample([5, 1, 4, 4], 1), sample([5, 1, 4, 5], 1), sample([5, 1, 5, 2], 1), sample([5, 1, 5, 3], 1), sample([5, 1, 5, 4], 1), sample([5, 1, 5, 5], 1), sample([5, 2, 1, 1], 0), sample([5, 2, 1, 2], 0), sample([5, 2, 1, 3], 0), sample([5, 2, 1, 4], 0), sample([5, 2, 1, 5], 0), sample([5, 2, 2, 1], 0), sample([5, 2, 2, 2], 0), sample([5, 2, 2, 3], 0), sample([5, 2, 2, 4], 0), sample([5, 2, 3, 1], 0), sample([5, 2, 3, 2], 0), sample([5, 2, 3, 3], 0), sample([5, 2, 3, 4], 1), sample([5, 2, 3, 5], 1), sample([5, 2, 4, 1], 0), sample([5, 2, 4, 2], 0), sample([5, 2, 4, 3], 1), sample([5, 2, 4, 4], 1), sample([5, 2, 4, 5], 1), sample([5, 2, 5, 1], 0), sample([5, 2, 5, 3], 1), sample([5, 2, 5, 4], 1), sample([5, 2, 5, 5], 1), sample([5, 3, 1, 1], 0), sample([5, 3, 1, 2], 0), sample([5, 3, 1, 3], 0), sample([5, 3, 1, 4], 0), sample([5, 3, 1, 5], 0), sample([5, 3, 2, 1], 0), sample([5, 3, 2, 2], 0), sample([5, 3, 2, 3], 0), sample([5, 3, 2, 4], 0), sample([5, 3, 2, 5], 0), sample([5, 3, 3, 1], 0), sample([5, 3, 3, 2], 0), sample([5, 3, 3, 3], 0), sample([5, 3, 3, 4], 0), sample([5, 3, 4, 1], 0), sample([5, 3, 4, 2], 0), sample([5, 3, 4, 3], 0), sample([5, 3, 4, 4], 1), sample([5, 3, 4, 5], 1), sample([5, 3, 5, 1], 0), sample([5, 3, 5, 2], 0), sample([5, 3, 5, 4], 1), sample([5, 3, 5, 5], 1), sample([5, 4, 1, 1], 0), sample([5, 4, 1, 2], 0), sample([5, 4, 1, 3], 0), sample([5, 4, 1, 4], 0), sample([5, 4, 1, 5], 0), sample([5, 4, 2, 1], 0), sample([5, 4, 2, 2], 0), sample([5, 4, 2, 3], 0), sample([5, 4, 2, 4], 0), sample([5, 4, 2, 5], 0), sample([5, 4, 3, 1], 0), sample([5, 4, 3, 2], 0), sample([5, 4, 3, 3], 0), sample([5, 4, 3, 4], 0), sample([5, 4, 3, 5], 0), sample([5, 4, 4, 1], 0), sample([5, 4, 4, 2], 0), sample([5, 4, 4, 3], 0), sample([5, 4, 4, 4], 0), sample([5, 4, 5, 1], 0), sample([5, 4, 5, 2], 0), sample([5, 4, 5, 3], 0), sample([5, 4, 5, 5], 1), sample([5, 5, 1, 1], 0), sample([5, 5, 1, 2], 0), sample([5, 5, 1, 3], 0), sample([5, 5, 1, 4], 0), sample([5, 5, 1, 5], 0), sample([5, 5, 2, 1], 0), sample([5, 5, 2, 2], 0), sample([5, 5, 2, 3], 0), sample([5, 5, 2, 4], 0), sample([5, 5, 2, 5], 0), sample([5, 5, 3, 1], 0), sample([5, 5, 3, 2], 0), sample([5, 5, 3, 3], 0), sample([5, 5, 3, 4], 0), sample([5, 5, 3, 5], 0), sample([5, 5, 4, 1], 0), sample([5, 5, 4, 2], 0), sample([5, 5, 4, 3], 0), sample([5, 5, 4, 4], 0), sample([5, 5, 4, 5], 0), sample([5, 5, 5, 1], 0), sample([5, 5, 5, 2], 0), sample([5, 5, 5, 3], 0), sample([5, 5, 5, 4], 0), continuous=[True, True, True, True] ) async def main(): async with mpc: forest = await output(await train_forest(samples, amount=2, depth=4)) for index, tree in enumerate(forest): print(f"Tree #{index}") tree.pretty_print() if __name__ == '__main__': mpc.run(main())
26.669739
77
0.388722
3,543
17,362
1.901778
0.013266
0.299199
0.116355
0.032057
0.938557
0
0
0
0
0
0
0.240187
0.308893
17,362
650
78
26.710769
0.32136
0
0
0
0
0
0.00121
0
0
0
0
0
0
1
0.001684
false
0
0.008418
0.001684
0.011785
0.003367
0
0
1
null
1
0
0
1
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f753fbf3ff60b90fee53bf99ea41acdd1b952379
3,173
py
Python
networkx_mod/algorithms/__init__.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
networkx_mod/algorithms/__init__.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
networkx_mod/algorithms/__init__.py
movingpictures83/MATria
d3dbd0d15e00dbc26db39ace0663868180fdc471
[ "BSD-3-Clause", "MIT" ]
null
null
null
from networkx_mod.algorithms.assortativity import * from networkx_mod.algorithms.block import * from networkx_mod.algorithms.boundary import * from networkx_mod.algorithms.centrality import * from networkx_mod.algorithms.cluster import * from networkx_mod.algorithms.clique import * from networkx_mod.algorithms.community import * from networkx_mod.algorithms.components import * from networkx_mod.algorithms.coloring import * from networkx_mod.algorithms.core import * from networkx_mod.algorithms.cycles import * from networkx_mod.algorithms.dag import * from networkx_mod.algorithms.distance_measures import * from networkx_mod.algorithms.dominance import * from networkx_mod.algorithms.dominating import * from networkx_mod.algorithms.hierarchy import * from networkx_mod.algorithms.matching import * from networkx_mod.algorithms.mis import * from networkx_mod.algorithms.mst import * from networkx_mod.algorithms.link_analysis import * from networkx_mod.algorithms.link_prediction import * from networkx_mod.algorithms.operators import * from networkx_mod.algorithms.shortest_paths import * from networkx_mod.algorithms.smetric import * from networkx_mod.algorithms.traversal import * from networkx_mod.algorithms.isolate import * from networkx_mod.algorithms.euler import * from networkx_mod.algorithms.vitality import * from networkx_mod.algorithms.chordal import * from networkx_mod.algorithms.richclub import * from networkx_mod.algorithms.distance_regular import * from networkx_mod.algorithms.swap import * from networkx_mod.algorithms.graphical import * from networkx_mod.algorithms.simple_paths import * import networkx_mod.algorithms.assortativity import networkx_mod.algorithms.bipartite import networkx_mod.algorithms.centrality import networkx_mod.algorithms.cluster import networkx_mod.algorithms.clique import networkx_mod.algorithms.components import networkx_mod.algorithms.connectivity import networkx_mod.algorithms.coloring import networkx_mod.algorithms.flow import networkx_mod.algorithms.isomorphism import networkx_mod.algorithms.link_analysis import networkx_mod.algorithms.shortest_paths import networkx_mod.algorithms.traversal import networkx_mod.algorithms.chordal import networkx_mod.algorithms.operators import networkx_mod.algorithms.tree # bipartite from networkx_mod.algorithms.bipartite import (projected_graph, project, is_bipartite, complete_bipartite_graph) # connectivity from networkx_mod.algorithms.connectivity import (minimum_edge_cut, minimum_node_cut, average_node_connectivity, edge_connectivity, node_connectivity, stoer_wagner, all_pairs_node_connectivity) # isomorphism from networkx_mod.algorithms.isomorphism import (is_isomorphic, could_be_isomorphic, fast_could_be_isomorphic, faster_could_be_isomorphic) # flow from networkx_mod.algorithms.flow import (maximum_flow, maximum_flow_value, minimum_cut, minimum_cut_value, capacity_scaling, network_simplex, min_cost_flow_cost, max_flow_min_cost, min_cost_flow, cost_of_flow) from .tree.recognition import * from .tree.branchings import ( maximum_branching, minimum_branching, maximum_spanning_arborescence, minimum_spanning_arborescence )
43.465753
86
0.86133
406
3,173
6.458128
0.199507
0.226545
0.432494
0.362319
0.708238
0.104882
0
0
0
0
0
0
0.080366
3,173
72
87
44.069444
0.898561
0.012291
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.861538
0
0.861538
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f7705e3cc1b211d4663b54c4da3c65f968c2a26c
135
py
Python
zoo/__init__.py
uliana291/the-zoo
a15a4162c39553abe91224f4feff5d3b66f9413e
[ "MIT" ]
90
2018-11-20T10:58:24.000Z
2022-02-19T16:12:46.000Z
zoo/__init__.py
uliana291/the-zoo
a15a4162c39553abe91224f4feff5d3b66f9413e
[ "MIT" ]
348
2018-11-21T09:22:31.000Z
2021-11-03T13:45:08.000Z
zoo/__init__.py
aexvir/the-zoo
7816afb9a0a26c6058b030b4a987c73e952d92bd
[ "MIT" ]
11
2018-12-08T18:42:07.000Z
2021-02-21T06:27:58.000Z
from ddtrace import patch_all from .base.celery import app as celery_app from .base.wsgi import application patch_all(requests=True)
19.285714
42
0.822222
22
135
4.909091
0.590909
0.148148
0
0
0
0
0
0
0
0
0
0
0.125926
135
6
43
22.5
0.915254
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f7a10b6976072b5836edf56561287db490f53e79
73
py
Python
scripts/__init__.py
arte-dev/opensdg-ibestat
32cfd5856c7028df969e050435926b6d5bb29e2c
[ "MIT" ]
null
null
null
scripts/__init__.py
arte-dev/opensdg-ibestat
32cfd5856c7028df969e050435926b6d5bb29e2c
[ "MIT" ]
null
null
null
scripts/__init__.py
arte-dev/opensdg-ibestat
32cfd5856c7028df969e050435926b6d5bb29e2c
[ "MIT" ]
null
null
null
from . import overrides from . import build_data from . import check_data
24.333333
24
0.808219
11
73
5.181818
0.545455
0.526316
0
0
0
0
0
0
0
0
0
0
0.150685
73
3
25
24.333333
0.919355
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e39fb3f56a514105f23d8bd18223641f39ed5f91
324
py
Python
scrapy/contrib/spidermiddleware/httperror.py
jesuslosada/scrapy
8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b
[ "BSD-3-Clause" ]
22
2018-03-13T13:51:41.000Z
2022-02-19T07:27:48.000Z
scrapy/contrib/spidermiddleware/httperror.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
10
2020-02-11T23:34:28.000Z
2022-03-11T23:16:12.000Z
scrapy/contrib/spidermiddleware/httperror.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
6
2017-12-28T03:59:54.000Z
2020-02-26T16:01:45.000Z
import warnings from scrapy.exceptions import ScrapyDeprecationWarning warnings.warn("Module `scrapy.contrib.spidermiddleware.httperror` is deprecated, " "use `scrapy.spidermiddlewares.httperror` instead", ScrapyDeprecationWarning, stacklevel=2) from scrapy.spidermiddlewares.httperror import *
40.5
82
0.774691
29
324
8.655172
0.62069
0.079681
0.25498
0
0
0
0
0
0
0
0
0.003636
0.151235
324
7
83
46.285714
0.909091
0
0
0
0
0
0.351852
0.243827
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e3d24a4198b47fdceb1099d7e5b98d5d1e277f46
366
py
Python
igamelister/webscraper/interpolation.py
chris-vg/igamelister
807f4d504911341edbc7ffc187c3a19b29a72ace
[ "MIT" ]
null
null
null
igamelister/webscraper/interpolation.py
chris-vg/igamelister
807f4d504911341edbc7ffc187c3a19b29a72ace
[ "MIT" ]
null
null
null
igamelister/webscraper/interpolation.py
chris-vg/igamelister
807f4d504911341edbc7ffc187c3a19b29a72ace
[ "MIT" ]
null
null
null
from configparser import BasicInterpolation class TemplateInterpolation(BasicInterpolation): def __init__(self, **kwargs): self._interpolate_dict = kwargs def set(self, key: str, value): self._interpolate_dict[key] = value def before_get(self, parser, section, option, value, defaults): return value % self._interpolate_dict
26.142857
67
0.718579
40
366
6.3
0.575
0.178571
0.22619
0.190476
0
0
0
0
0
0
0
0
0.196721
366
13
68
28.153846
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0.375
false
0
0.125
0.125
0.75
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
e3e42d8bf5c1361f22824339a6783086aa4f4fd6
2,784
py
Python
test_mnist_with_keras.py
nothingbutpassion/dldiy
53c6365fb5689b47ec62cf3bb4c3d5bde621e8f4
[ "Apache-2.0" ]
1
2018-12-24T12:15:25.000Z
2018-12-24T12:15:25.000Z
test_mnist_with_keras.py
nothingbutpassion/dldiy
53c6365fb5689b47ec62cf3bb4c3d5bde621e8f4
[ "Apache-2.0" ]
null
null
null
test_mnist_with_keras.py
nothingbutpassion/dldiy
53c6365fb5689b47ec62cf3bb4c3d5bde621e8f4
[ "Apache-2.0" ]
null
null
null
import numpy as np import matplotlib.pyplot as plt import datasets.mnist as mnist from tensorflow.keras import models from tensorflow.keras import layers from tensorflow.keras import optimizers from tensorflow.keras import losses def test_mnist(): (train_x, train_y), (test_x, test_y) = mnist.load_data() val_x = train_x[50000:] val_y = train_y[50000:] train_x = train_x[:50000] train_y = train_y[:50000] model = models.Sequential() model.add(layers.Dense(28, activation="relu", input_shape=(train_x.shape[1],))) model.add(layers.Dense(10, activation="relu")) model.add(layers.Dense(10, activation="softmax")) model.compile(optimizer=optimizers.SGD(lr=0.001), loss=losses.categorical_crossentropy, metrics=['accuracy']) model.summary() history = model.fit(train_x, train_y, batch_size=200, epochs=32, validation_data=(val_x, val_y)).history epochs = range(1, len(history["loss"])+1) plt.plot(epochs, history["loss"], 'ro', label="Traning loss") plt.plot(epochs, history["val_loss"], 'go',label="Validating loss") plt.plot(epochs, history["accuracy"], 'r', label="Traning accuracy") plt.plot(epochs, history["val_accuracy"], 'g', label="Validating accuracy") plt.title('Training/Validating loss/accuracy') plt.xlabel('Epochs') plt.ylabel('Loss/Accuracy') plt.legend() plt.show(block=True) def test_mnist_with_cov2d(): (train_x, train_y), (test_x, test_y) = mnist.load_data(flatten=False) val_x = train_x[50000:] val_y = train_y[50000:] train_x = train_x[:50000] train_y = train_y[:50000] model = models.Sequential() model.add(layers.Conv2D(4, (3, 3), strides=(1,1), padding='same', data_format="channels_first", activation="relu", input_shape=(1, 28, 28))) model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2), data_format="channels_first")) model.add(layers.Flatten()) model.add(layers.Dense(10, activation="softmax")) model.compile(optimizer=optimizers.SGD(lr=0.001), loss=losses.categorical_crossentropy, metrics=['accuracy']) model.summary() history = model.fit(train_x, train_y, batch_size=200, epochs=20, validation_data=(val_x, val_y)).history epochs = range(1, len(history["loss"])+1) plt.plot(epochs, history["loss"], 'ro', label="Traning loss") plt.plot(epochs, history["val_loss"], 'go',label="Validating loss") plt.plot(epochs, history["acc"], 'r', label="Traning accuracy") plt.plot(epochs, history["val_acc"], 'g', label="Validating accuracy") plt.title('Training/Validating loss/accuracy') plt.xlabel('Epochs') plt.ylabel('Loss/Accuracy') plt.legend() plt.show(block=True) if __name__ == "__main__": test_mnist()
45.639344
145
0.682112
391
2,784
4.690537
0.245524
0.035987
0.056707
0.087241
0.725736
0.725736
0.708833
0.708833
0.708833
0.660851
0
0.037431
0.155532
2,784
60
146
46.4
0.742663
0
0
0.571429
0
0
0.140969
0
0
0
0
0
0
1
0.035714
false
0
0.125
0
0.160714
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e3e8d0c67a85eb26f40805821cf5852f52a05d55
210
py
Python
simple_salesforce/aio/__init__.py
MulliganFunding/simple-salesforce
6d43d252683e688eb50faab46c6030afc0aa9838
[ "Apache-2.0" ]
null
null
null
simple_salesforce/aio/__init__.py
MulliganFunding/simple-salesforce
6d43d252683e688eb50faab46c6030afc0aa9838
[ "Apache-2.0" ]
null
null
null
simple_salesforce/aio/__init__.py
MulliganFunding/simple-salesforce
6d43d252683e688eb50faab46c6030afc0aa9838
[ "Apache-2.0" ]
null
null
null
"""Simple-Salesforce Asyncio Package""" # flake8: noqa from .api import build_async_salesforce_client, AsyncSalesforce, AsyncSFType from .bulk import AsyncSFBulkHandler from .login import AsyncSalesforceLogin
30
76
0.833333
23
210
7.478261
0.782609
0
0
0
0
0
0
0
0
0
0
0.005291
0.1
210
6
77
35
0.904762
0.22381
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e3eba1d73095e338b7673269be9d1dcb3f1a33e6
1,126
py
Python
gloop/repositories/user_repository.py
pitzer42/gloop_old
9ad5ad9e10387e17dfd5a40e65d2910e10f59d77
[ "MIT" ]
null
null
null
gloop/repositories/user_repository.py
pitzer42/gloop_old
9ad5ad9e10387e17dfd5a40e65d2910e10f59d77
[ "MIT" ]
null
null
null
gloop/repositories/user_repository.py
pitzer42/gloop_old
9ad5ad9e10387e17dfd5a40e65d2910e10f59d77
[ "MIT" ]
null
null
null
from typing import ( NoReturn, List ) from abc import ( ABC, abstractmethod ) from gloop.models.user import User from gloop.repositories.data_ports import pass_by class UserRepository(ABC): @abstractmethod async def to_list(self, length=100, data_port=pass_by) -> List: raise NotImplemented() @abstractmethod async def count(self) -> int: raise NotImplemented() @abstractmethod async def insert(self, user: User, data_port=pass_by) -> NoReturn: raise NotImplemented() @abstractmethod async def get_by_name(self, name: str, data_port=pass_by) -> User: raise NotImplemented() @abstractmethod async def get_by_token(self, token: str, data_port=pass_by) -> User: raise NotImplemented() @abstractmethod async def delete_by_name(self, name: str) -> NoReturn: raise NotImplemented() @abstractmethod async def delete_token(self, token: str) -> NoReturn: raise NotImplemented() @abstractmethod async def set_token(self, name: str, token: str) -> NoReturn: raise NotImplemented()
23.458333
72
0.676732
132
1,126
5.628788
0.265152
0.204576
0.236878
0.358008
0.601615
0.402423
0.375505
0.166891
0.166891
0.166891
0
0.003476
0.23357
1,126
47
73
23.957447
0.857474
0
0
0.457143
0
0
0
0
0
0
0
0
0
1
0
true
0.142857
0.114286
0
0.142857
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
540a8d103f0307647762947e43d82991001ba0d2
192
py
Python
aadnes_journey/models/character.py
mradrianhh/Aadnes-journey
13d6065eb0997b5ee9ea64e38de104d035178ef4
[ "MIT" ]
null
null
null
aadnes_journey/models/character.py
mradrianhh/Aadnes-journey
13d6065eb0997b5ee9ea64e38de104d035178ef4
[ "MIT" ]
null
null
null
aadnes_journey/models/character.py
mradrianhh/Aadnes-journey
13d6065eb0997b5ee9ea64e38de104d035178ef4
[ "MIT" ]
null
null
null
class Character(): __name: str # Getters and setters. def get_name(self) -> str: return self.__name def set_name(self, name: str) -> None: self.__name = name
19.2
42
0.59375
25
192
4.24
0.52
0.226415
0
0
0
0
0
0
0
0
0
0
0.296875
192
10
43
19.2
0.785185
0.104167
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.166667
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
58138e64152781a2db0bb9b073443ec70bd142cd
583
py
Python
src/locale/_cid.py
sffjunkie/locale
ad9a7605487e3d3db3bc0156a136fef27cb392e1
[ "Apache-2.0" ]
null
null
null
src/locale/_cid.py
sffjunkie/locale
ad9a7605487e3d3db3bc0156a136fef27cb392e1
[ "Apache-2.0" ]
null
null
null
src/locale/_cid.py
sffjunkie/locale
ad9a7605487e3d3db3bc0156a136fef27cb392e1
[ "Apache-2.0" ]
null
null
null
class CaseInsensitiveDict(dict): __transform__ = str.upper def __init__(self, *args, **kwargs): for k, v in dict(*args, **kwargs).items(): self[CaseInsensitiveDict.__transform__(k)] = v def __contains__(self, key): return dict.__contains__(self, CaseInsensitiveDict.__transform__(key)) def __getitem__(self, key): val = dict.__getitem__(self, CaseInsensitiveDict.__transform__(key)) return val def __repr__(self): dictrepr = dict.__repr__(self) return '%s(%s)' % (type(self).__name__, dictrepr)
32.388889
78
0.650086
62
583
5.33871
0.419355
0.208459
0.29003
0.21148
0
0
0
0
0
0
0
0
0.2247
583
17
79
34.294118
0.732301
0
0
0
0
0
0.010292
0
0
0
0
0
0
1
0.307692
false
0
0
0.076923
0.692308
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
581d3b5619094793c2af904b989b57d0e070c317
684
py
Python
sdk/python/pulumi_gcp/pubsub/__init__.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/pubsub/__init__.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/pubsub/__init__.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from .get_topic import * from .lite_subscription import * from .lite_topic import * from .schema import * from .subscription import * from .subscription_iam_binding import * from .subscription_iam_member import * from .subscription_iam_policy import * from .topic import * from .topic_iam_binding import * from .topic_iam_member import * from .topic_iam_policy import * from ._inputs import * from . import outputs
31.090909
87
0.767544
99
684
5.131313
0.484848
0.255906
0.173228
0.147638
0
0
0
0
0
0
0
0.001724
0.152047
684
21
88
32.571429
0.874138
0.320175
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
583e5d3813f00f82a5a7bf5f196051e00f488e94
86
py
Python
admin/forms/__init__.py
Simple2B/cortex-backend
9cf6802b0eff9254875bcbe553517500ccfc9082
[ "MIT" ]
1
2021-10-17T13:28:51.000Z
2021-10-17T13:28:51.000Z
admin/forms/__init__.py
Simple2B/cortex-backend
9cf6802b0eff9254875bcbe553517500ccfc9082
[ "MIT" ]
null
null
null
admin/forms/__init__.py
Simple2B/cortex-backend
9cf6802b0eff9254875bcbe553517500ccfc9082
[ "MIT" ]
null
null
null
# flake8: noqa F401 from .auth import LoginForm from .new_doctor import NewDoctorForm
21.5
37
0.813953
12
86
5.75
0.833333
0
0
0
0
0
0
0
0
0
0
0.054054
0.139535
86
3
38
28.666667
0.878378
0.197674
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5844e0c49edb7766cf0cffa86f40874fe3b523eb
64
py
Python
Python Exercise/Tensorboard.py
mewadashreya/MLOne-Basic
f5048379a3b276cd371ce89cca6a0949d2815ce9
[ "Apache-2.0" ]
1
2020-09-20T19:00:08.000Z
2020-09-20T19:00:08.000Z
Python Exercise/Tensorboard.py
SarangDeshmukh7/MLOne-Basic
490161740e536e3c5689ad248de1be931160e7c7
[ "Apache-2.0" ]
null
null
null
Python Exercise/Tensorboard.py
SarangDeshmukh7/MLOne-Basic
490161740e536e3c5689ad248de1be931160e7c7
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import datetime print("Hello World")
16
24
0.765625
9
64
5.444444
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.171875
64
3
25
21.333333
0.924528
0
0
0
0
0
0.180328
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5849e48b30b1af8fb151c42e016aafdd544153e2
198
py
Python
projects/PointRend/point_rend/__init__.py
Shun14/detectron2-ResNeSt
cda53a237199da3bbe7526d41c41b9d8df4c4814
[ "Apache-2.0" ]
344
2020-04-18T18:33:33.000Z
2020-12-04T08:34:30.000Z
projects/PointRend/point_rend/__init__.py
ZhanqiZhang66/detectron2
be0d7283297f6314c8e683e0d1ff80b668aa9f4a
[ "Apache-2.0" ]
82
2020-01-29T23:48:32.000Z
2021-09-08T02:09:30.000Z
projects/PointRend/point_rend/__init__.py
ZhanqiZhang66/detectron2
be0d7283297f6314c8e683e0d1ff80b668aa9f4a
[ "Apache-2.0" ]
66
2020-04-20T08:30:49.000Z
2020-12-06T12:55:12.000Z
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved from .config import add_pointrend_config from .coarse_mask_head import CoarseMaskHead from .roi_heads import PointRendROIHeads
39.6
70
0.838384
27
198
5.962963
0.814815
0
0
0
0
0
0
0
0
0
0
0
0.116162
198
4
71
49.5
0.92
0.343434
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
585e40e2fc4d4f110d13fea49ba3922c3de83ab7
180
py
Python
src/data/__init__.py
marka17/digit-recognition
129fa77dc41eca4f4ffbc6a37045a194cd4beb12
[ "MIT" ]
null
null
null
src/data/__init__.py
marka17/digit-recognition
129fa77dc41eca4f4ffbc6a37045a194cd4beb12
[ "MIT" ]
null
null
null
src/data/__init__.py
marka17/digit-recognition
129fa77dc41eca4f4ffbc6a37045a194cd4beb12
[ "MIT" ]
null
null
null
from .dataset import SpeechDataset from .batch import Batch from .dataloader import PaddingCollator from .batch_processor import BatchProcessor from .dictionary import ctc_decode
25.714286
43
0.855556
22
180
6.909091
0.545455
0.118421
0
0
0
0
0
0
0
0
0
0
0.116667
180
6
44
30
0.955975
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
588f32a19e9977dc3b3d1e3c1942bee7de7437c5
160
py
Python
quarkchain/reward.py
tim-yoshi/pyquarkchain
1847542c166a180b5ffc3c6e917751be85fa15a6
[ "MIT" ]
237
2018-09-18T00:47:14.000Z
2022-03-21T21:43:07.000Z
quarkchain/reward.py
tim-yoshi/pyquarkchain
1847542c166a180b5ffc3c6e917751be85fa15a6
[ "MIT" ]
409
2018-09-18T01:02:29.000Z
2022-01-24T20:51:58.000Z
quarkchain/reward.py
tim-yoshi/pyquarkchain
1847542c166a180b5ffc3c6e917751be85fa15a6
[ "MIT" ]
125
2018-09-18T00:47:28.000Z
2022-03-24T20:00:46.000Z
class ConstMinorBlockRewardCalcultor: def __init__(self, env): self.env = env def get_block_reward(self): return 100000000000000000000
22.857143
37
0.70625
16
160
6.6875
0.6875
0.130841
0
0
0
0
0
0
0
0
0
0.170732
0.23125
160
6
38
26.666667
0.699187
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
5466f12b262098db3523d985c1b3e6a096dfcd95
125
py
Python
openpyxlzip/cell/__init__.py
ankitJoshi03/openpyxlzip
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
[ "MIT" ]
null
null
null
openpyxlzip/cell/__init__.py
ankitJoshi03/openpyxlzip
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
[ "MIT" ]
null
null
null
openpyxlzip/cell/__init__.py
ankitJoshi03/openpyxlzip
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
[ "MIT" ]
null
null
null
# Copyright (c) 2010-2020 openpyxlzip from .cell import Cell, WriteOnlyCell, MergedCell from .read_only import ReadOnlyCell
25
49
0.808
16
125
6.25
0.8125
0
0
0
0
0
0
0
0
0
0
0.073395
0.128
125
4
50
31.25
0.844037
0.28
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
546dea090d962fbe2507aba3eaab28e9ba86c00b
28,108
py
Python
MKDecrypt.py
AmNe5iA/MKDecrypt
4bc8668e0fa53ef02dda2ff358912046d555ad09
[ "MIT" ]
7
2017-02-07T22:44:24.000Z
2021-09-29T13:28:16.000Z
MKDecrypt.py
AmNe5iA/MKDecrypt
4bc8668e0fa53ef02dda2ff358912046d555ad09
[ "MIT" ]
2
2018-03-12T22:24:52.000Z
2021-10-03T08:14:41.000Z
MKDecrypt.py
AmNe5iA/MKDecrypt
4bc8668e0fa53ef02dda2ff358912046d555ad09
[ "MIT" ]
7
2018-12-12T17:52:28.000Z
2022-03-07T13:23:30.000Z
#!/usr/bin/python3 # MKDecrypt.py (Master Key Decryptor) is a python script to assist with # decrypting encrypted volumes using the recovered masterkey for various # truecrypt type encrypted volumes. # Created by Matt Smith # email: amnesia_1337 (at) hotmail (dot) co (dot) uk # Limitations: May produce false negatives if the filesystem used is not one of # the standard truecrypt/veracrypt formats. The HFS+ implementation is # _sketchy_ but appears to work, for now. import os import stat import argparse import subprocess import binascii def main(): ## print empty line away from command line so easier for user to read print(' ') ## Setup arguments/options using imported argparse module parser = argparse.ArgumentParser(description='''%(prog)s (Master Key Decryptor) is a python script to assist with decrypting encrypted volumes using the recovered masterkey for various encrypted containers. Script should be run as root, sudo recommended.''', epilog='''Example: [sudo] ./%(prog)s -m /mnt truecrypt.tc 123...def''') parser.add_argument('-v', '--verbose', action='store_true', help=''' verbose output''') parser.add_argument('-X', '--volatility', action='store_true', help='specifies MASTERKEY is a volatility file instead of hex chars') rwgroup = parser.add_mutually_exclusive_group() rwgroup.add_argument('-r', '--read-only', action='store_true', help='opens FILE in read only mode (default)') rwgroup.add_argument('-w', '--read-write', action='store_true', help='opens FILE in read/write mode') parser.add_argument('-m', '--mountpoint', help='''mount encrypted volume at MOUNTPOINT''', default='N0P3') parser.add_argument('FILE', help='the encrypted container, FILE') parser.add_argument('MASTERKEY', help='''the MASTERKEY as a hexadecimal string''') args = parser.parse_args() ## check to see if you are using a raw volatility dump if(args.volatility): isMKFile = os.access(args.MASTERKEY, os.F_OK) if isMKFile: masterkeyfile = open(args.MASTERKEY,"rb").read() args.MASTERKEY = binascii.hexlify(masterkeyfile).decode('utf-8') else: print (args.MASTERKEY + ' is not a file.' ) exit(1) ## check to see if this script is being run as root/superuser. exit if not if not os.geteuid() == 0: print("This script needs to be run as root/superuser.") exit(1) ## setup ro(read-only) flag for passing to other programs ro = '' if not args.read_write: args.read_only=True ro = '-r' ## check master key length is correct and is only hex charachters if not len(args.MASTERKEY) == 128 and not len(args.MASTERKEY) == 256 and not len(args.MASTERKEY) == 384: print('MASTERKEY is not of the correct length. It should be 128, 256 or 384 hexadecimal characters in length.') exit(1) hexis = set('0123456789abcdefABCDEF') for c in args.MASTERKEY: if not c in hexis: print ( c + ' is not a hexadecimal character') exit(1) ## check file specified by user actually exists isFILE = os.access(args.FILE, os.F_OK) if isFILE and args.verbose: print (args.FILE + ' exists') elif not isFILE: print ('No such file: ' + args.FILE) exit(1) ## check mount option and mount point if args.mountpoint == 'N0P3': mp=False else: isDIR = os.path.isdir(args.mountpoint) if isDIR and args.verbose: print(args.mountpoint + ' exists') elif not isDIR: print('No such mountpoint: ' + args.mountpoint) exit(1) mp=True ## find a free MKDecrypt device mapper slot for i in range(8): dmname = "MKDecrypt" + str(i+1) dmslot = "/dev/mapper/" + dmname takenslot = os.access(dmslot, os.F_OK) if not takenslot: break elif i == 7: print('All 8 MKDecrypt slots are taken! Free some up.') exit(1) ## check to see if container is already a blockdev ## if so, skip mounting it as loop device mode=os.lstat(args.FILE).st_mode isBLKDEV = stat.S_ISBLK(mode) if isBLKDEV: loopdev = args.FILE ## otherwise mount container as loop device else: losetupcmd = 'losetup ' + ro + ' -f --show ' + args.FILE losetupoutput = subprocess.check_output(losetupcmd, shell=True, universal_newlines=True) loopdev = losetupoutput[:-1] if args.verbose: print (loopdev + ' has been setup as loop device of ' + args.FILE) ## get size in sectors of FILE and remove 512 sectors from the size. ## 512 sectors is from Truecrypt header (256 sectors at start of file) ## + backup header (256 sectors at end of file) evsize = int(subprocess.check_output(['blockdev', '--getsz', loopdev])) - 512 extevrange = evsize - 3 ## Define binary values for OEMs (VBR) for later test binMSDOS = str.encode('MSDOS') binMSWIN = str.encode('MSWIN') binEXFAT = str.encode('EXFAT') binNTFS = str.encode('NTFS ') binMKDOS = str.encode('mkdos') binIBM = str.encode('IBM ') binFREEDOS = str.encode('FreeD') binMKFS = str.encode('mkfs.') ## define binary values for Ext and HFS+ tests and setup flags for use after test ## set j=0 for all filesystems except HFS+ (changed later for HFS+) binExtSig = binascii.a2b_hex('53ef') bin000000 = binascii.a2b_hex('000000') binHFSJ = str.encode('H+') binHFSX = str.encode('HX') isExt = False isHFSP = False j = 0 ## if not cascaded encryption if len(args.MASTERKEY) == 128: crypts = [' aes-xts-plain64 ', ' serpent-xts-plain64 ', ' twofish-xts-plain64 ', ' camellia-xts-plain64 ', ' kuznyechik-xts-plain64 '] ## first check if normal/outer volume tryhiddenvol = False for crypt in crypts: ## create table entry for dmsetup command table = '"0 ' + str(evsize) + ' crypt' + crypt + args.MASTERKEY + ' 256 ' + loopdev + ' 256"' ## create dmsetup command ready to pass to shell, then pass it dmsetupcmd = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table subprocess.call(dmsetupcmd, shell=True) ## test that the volume has decrypted correctly by reading (part of) the OEM from VBR test = open(dmslot, 'rb') test.seek(3) OEM = test.read(5) test.seek(1024) HFSPSig = test.read(2) test.seek(1080) ExtSig = test.read(2) test.seek(1097) ExtOS = test.read(3) test.seek(1120) FIARes = test.read(3) test.close() if ExtSig == binExtSig and ExtOS == bin000000: isExt = True elif HFSPSig == binHFSJ and FIARes == bin000000: isHFSP = True elif HFSPSig == binHFSX and FIARes == bin000000: isHFSP = True if OEM == binMSDOS or OEM == binMSWIN or OEM == binEXFAT or OEM == binNTFS or OEM == binMKDOS or OEM == binIBM or OEM == binFREEDOS or OEM == binMKFS or isExt or isHFSP: print('Normal/outer volume found in ' + args.FILE + ' using' + crypt) break ## if it hasn't worked remove device mapping else: rmdecfile = 'dmsetup remove ' + dmname subprocess.call(rmdecfile, shell=True) if crypt == ' kuznyechik-xts-plain64 ': ## if all encryption types have been tried then try hidden volumes tryhiddenvol = True if tryhiddenvol: print ('Masterkey does not decrypt a normal/outer volume. Trying for a hidden volume...') for crypt in crypts: ## create table entry for dmsetup command table = '"0 ' + str(evsize) + ' crypt' + crypt + args.MASTERKEY + ' 256 ' + loopdev + ' 256"' ## create dmsetup command ready to pass to shell, then pass it dmsetupcmd = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table subprocess.call(dmsetupcmd, shell=True) ## search for OEM which indicates where hidden volume VBR is located within the container search = open(dmslot, 'rb') for i in range(evsize): ## provide the user with an update every 100,000 sectors if (i % 100000) == 0 : print('Scanning byte ' + str(i*512) + ' of ' + str(evsize*512) + ' using' + crypt, end=' \r') search.seek((i*512)+3) srchOEM = search.read(5) if i <= (extevrange): search.seek((i*512)+1024) srchHFSPSig = search.read(2) search.seek((i*512)+1080) srchExtSig = search.read(2) search.seek((i*512)+1097) srchExtOS = search.read(3) search.seek((i*512)+1120) srchFIARes = search.read(3) if srchExtSig == binExtSig and srchExtOS == bin000000: isExt = True elif srchHFSPSig == binHFSJ and srchFIARes == bin000000: isHFSP = True elif srchHFSPSig == binHFSX and srchFIARes == bin000000: isHFSP = True ## Linux HSF+ driver fails if backup header is not where expected ## so find backup header before attempting to mount... if isHFSP: print('HFS+ filesystem found. Searching for backup volume header... ') for j in range (evsize-i): search.seek((evsize - j)*512 - 1024) bckHFSPSig = search.read(2) search.seek((evsize - j)*512 - 928) bckFIARes = search.read(3) if bckHFSPSig == binHFSJ and bckFIARes == bin000000: break elif bckHFSPSig == binHFSX and bckFIARes == bin000000: break elif j == (evsize-i)-5: search.close() rmdmcmd = 'dmsetup remove ' + dmname subprocess.call(rmdmcmd, shell=True) if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('Unable to find backup volume header. Is volume corrupted?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: search.close() print('Hidden volume found ' + str((i+256)*512) + ' bytes into ' + args.FILE + ' using' + crypt) rmdmcmd = 'dmsetup remove ' + dmname subprocess.call(rmdmcmd, shell=True) table = '"0 ' + str(evsize-(i+j)) + ' crypt' + crypt + args.MASTERKEY + ' ' + str(i+256) + ' ' + loopdev + ' ' + str(i+256) + '"' dmsetupcmd = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table subprocess.call(dmsetupcmd, shell=True) break elif i == evsize-1: search.close() rmdmcmd = 'dmsetup remove ' + dmname subprocess.call(rmdmcmd, shell=True) if crypt == ' kuznyechik-xts-plain64 ': if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('No volume decrypted in ' + args.FILE + '. Is masterkey correct?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: break ## if a 2 cascaded enryption type if len(args.MASTERKEY) == 256: ## split masterkey into 2 MK1 = args.MASTERKEY[128:] MK2 = args.MASTERKEY[:128] crypts = ['aes-twofish', 'camellia-kuznyechik', 'camellia-serpent', 'kuznyechik-aes', 'kuznyechik-twofish', 'serpent-aes', 'twofish-serpent'] tryhiddenvol = False ## first check for normal/outer volume for crypt in crypts: if crypt == 'aes-twofish': EN1 = ' aes-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' elif crypt == 'camellia-kuznyechik': EN1 = ' camellia-xts-plain64 ' EN2 = ' kuznyechik-xts-plain64 ' elif crypt == 'camellia-serpent': EN1 = ' camellia-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' elif crypt == 'kuznyechik-aes': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' aes-xts-plain64 ' elif crypt == 'kuznyechik-twofish': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' elif crypt == 'serpent-aes': EN1 = ' serpent-xts-plain64 ' EN2 = ' aes-xts-plain64 ' elif crypt == 'twofish-serpent': EN1 = ' twofish-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' table1 = '"0 ' + str(evsize) + ' crypt' + EN1 + MK1 + ' 256 ' + loopdev + ' 256"' table2 = '"0 ' + str(evsize) + ' crypt' + EN2 + MK2 + ' 256 ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table2 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) ## test that the volume has decrypted correctly by reading (part of) the OEM from VBR test = open(dmslot, 'rb') test.seek(3) OEM = test.read(5) test.seek(1024) HFSPSig = test.read(2) test.seek(1080) ExtSig = test.read(2) test.seek(1097) ExtOS = test.read(3) test.seek(1120) FIARes = test.read(3) test.close() if ExtSig == binExtSig and ExtOS == bin000000: isExt = True elif HFSPSig == binHFSJ and FIARes == bin000000: isHFSP = True elif HFSPSig == binHFSX and FIARes == bin000000: isHFSP = True if OEM == binMSDOS or OEM == binMSWIN or OEM == binEXFAT or OEM == binNTFS or OEM == binMKDOS or OEM == binIBM or OEM == binFREEDOS or OEM == binMKFS or isExt or isHFSP: print('Normal/outer volume found in '+ args.FILE + ' using' + EN1 + 'then' + EN2) break ## if it hasn't worked remove device mapping else: rmdecfile1 = 'dmsetup remove ' + dmname rmdecfile2 = 'dmsetup remove ' + dmname + '_1' subprocess.call(rmdecfile1, shell=True) subprocess.call(rmdecfile2, shell=True) ## if not normal volume check entire container for a hidden volume if crypt == 'twofish-serpent': tryhiddenvol = True if tryhiddenvol: print ('Masterkey does not decrypt a normal/outer volume. Trying for a hidden volume...') for crypt in crypts: if crypt == 'aes-twofish': EN1 = ' aes-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' elif crypt == 'camellia-kuznyechik': EN1 = ' camellia-xts-plain64 ' EN2 = ' kuznyechik-xts-plain64 ' elif crypt == 'camellia-serpent': EN1 = ' camellia-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' elif crypt == 'kuznyechik-aes': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' aes-xts-plain64 ' elif crypt == 'kuznyechik-twofish': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' elif crypt == 'serpent-aes': EN1 = ' serpent-xts-plain64 ' EN2 = ' aes-xts-plain64 ' elif crypt == 'twofish-serpent': EN1 = ' twofish-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' table1 = '"0 ' + str(evsize) + ' crypt' + EN1 + MK1 + ' 256 ' + loopdev + ' 256"' table2 = '"0 ' + str(evsize) + ' crypt' + EN2 + MK2 + ' 256 ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table2 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) ## search for OEM which indicates where hidden volume VBR is located within the container search = open(dmslot, 'rb') for i in range(evsize): ## provide the user with an update every 100,000 sectors if (i % 100000) == 0 : print('Scanning byte ' + str(i*512) + ' of ' + str(evsize*512) + ' using' + EN1 + 'then' + EN2, end=' \r') search.seek((i*512)+3) srchOEM = search.read(5) if i <= extevrange: search.seek((i*512)+1024) srchHFSPSig = search.read(2) search.seek((i*512)+1080) srchExtSig = search.read(2) search.seek((i*512)+1097) srchExtOS = search.read(3) search.seek((i*512)+1120) srchFIARes = search.read(3) if srchExtSig == binExtSig and srchExtOS == bin000000: isExt = True elif srchHFSPSig == binHFSJ and srchFIARes == bin000000: isHFSP = True elif srchHFSPSig == binHFSX and srchFIARes == bin000000: isHFSP = True ## Linux HSF+ driver fails if backup header is not where expected ## so find backup header before attempting to mount... if isHFSP: print('HFS+ filesystem found. Searching for backup volume header... ') for j in range (evsize-i): search.seek((evsize - j)*512 - 1024) bckHFSPSig = search.read(2) search.seek((evsize - j)*512 - 928) bckFIARes = search.read(3) if bckHFSPSig == binHFSJ and bckFIARes == bin000000: break elif bckHFSPSig == binHFSX and bckFIARes == bin000000: break elif j == (evsize-i)-5: search.close() rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('Unable to find backup volume header. Is volume corrupted?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: search.close() print('Hidden volume found ' + str((i+256)*512) + ' bytes into ' + args.FILE + ' using' + EN1 + 'then' + EN2 ) rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) table1 = '"0 ' + str(evsize-(i+j)) + ' crypt' + EN1 + MK1 + ' ' + str(i+256) + ' ' + loopdev + ' ' + str(i+256) + '"' table2 = '"0 ' + str(evsize-(i+j)) + ' crypt' + EN2 + MK2 + ' ' + str(i+256) + ' ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table2 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) break elif i == evsize-1: search.close() rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) if crypt == 'twofish-serpent': if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('No volume decrypted in ' + args.FILE + '. Is masterkey correct?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: break ## if a 3 cascaded enryption type if len(args.MASTERKEY) == 384: ## split masterkeys into 3 MK1 = args.MASTERKEY[256:] MK2 = args.MASTERKEY[128:256] MK3 = args.MASTERKEY[:128] crypts = ['aes-twofish-serpent', 'kuznyechik-serpent-camellia', 'serpent-twofish-aes'] tryhiddenvol = False ## first check for normal/outer volume for crypt in crypts: if crypt == 'aes-twofish-serpent': EN1 = ' aes-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' EN3 = ' serpent-xts-plain64 ' elif crypt == 'kuznyechik-serpent-camellia': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' EN3 = ' camellia-xts-plain64 ' elif crypt == 'serpent-twofish-aes': EN1 = ' serpent-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' EN3 = ' aes-xts-plain64 ' table1 = '"0 ' + str(evsize) + ' crypt' + EN1 + MK1 + ' 256 ' + loopdev + ' 256"' table2 = '"0 ' + str(evsize) + ' crypt' + EN2 + MK2 + ' 256 ' + dmslot + '_2 0"' table3 = '"0 ' + str(evsize) + ' crypt' + EN3 + MK3 + ' 256 ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_2 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table2 dmsetupcmd3 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table3 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) subprocess.call(dmsetupcmd3, shell=True) ## test that the volume has decrypted correctly by reading (part of) the OEM from VBR test = open(dmslot, 'rb') test.seek(3) OEM = test.read(5) test.seek(1024) HFSPSig = test.read(2) test.seek(1080) ExtSig = test.read(2) test.seek(1097) ExtOS = test.read(3) test.seek(1120) FIARes = test.read(3) test.close() if ExtSig == binExtSig and ExtOS == bin000000: isExt = True elif HFSPSig == binHFSJ and FIARes == bin000000: isHFSP = True elif HFSPSig == binHFSX and FIARes == bin000000: isHFSP = True if OEM == binMSDOS or OEM == binMSWIN or OEM == binEXFAT or OEM == binNTFS or OEM == binMKDOS or OEM == binIBM or OEM == binFREEDOS or OEM == binMKFS or isExt or isHFSP: print('Normal/outer volume found in ' + args.FILE + ' using' + EN1 + 'then' + EN2 + 'then' + EN3) break ## if it hasn't worked remove device mapping else: rmdecfile1 = 'dmsetup remove ' + dmname rmdecfile2 = 'dmsetup remove ' + dmname + '_1' rmdecfile3 = 'dmsetup remove ' + dmname + '_2' subprocess.call(rmdecfile1, shell=True) subprocess.call(rmdecfile2, shell=True) subprocess.call(rmdecfile3, shell=True) ## if not normal volume check entire container for a hidden volume if crypt == 'serpent-twofish-aes': tryhiddenvol = True if tryhiddenvol: print ('Masterkey does not decrypt a normal/outer volume. Trying for a hidden volume...') for crypt in crypts: if crypt == 'aes-twofish-serpent': EN1 = ' aes-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' EN3 = ' serpent-xts-plain64 ' elif crypt == 'kuznyechik-serpent-camellia': EN1 = ' kuznyechik-xts-plain64 ' EN2 = ' serpent-xts-plain64 ' EN3 = ' camellia-xts-plain64 ' elif crypt == 'serpent-twofish-aes': EN1 = ' serpent-xts-plain64 ' EN2 = ' twofish-xts-plain64 ' EN3 = ' aes-xts-plain64 ' table1 = '"0 ' + str(evsize) + ' crypt' + EN1 + MK1 + ' 256 ' + loopdev + ' 256"' table2 = '"0 ' + str(evsize) + ' crypt' + EN2 + MK2 + ' 256 ' + dmslot + '_2 0"' table3 = '"0 ' + str(evsize) + ' crypt' + EN3 + MK3 + ' 256 ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_2 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table2 dmsetupcmd3 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table3 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) subprocess.call(dmsetupcmd3, shell=True) ## search truecrypthidden for OEM which indicates where hidden volume VBR is search = open(dmslot, 'rb') for i in range(evsize): ## provide the user with an update every 100,000 sectors if (i % 100000) == 0 : print('Scanning byte ' + str(i*512) + ' of ' + str(evsize*512) + ' using' + EN1 + 'then' + EN2 + 'then' + EN3, end=' \r') search.seek((i*512)+3) srchOEM = search.read(5) if i <= (extevrange): search.seek((i*512)+1024) srchHFSPSig = search.read(2) search.seek((i*512)+1080) srchExtSig = search.read(2) search.seek((i*512)+1097) srchExtOS = search.read(3) search.seek((i*512)+1120) srchFIARes = search.read(3) if srchExtSig == binExtSig and srchExtOS == bin000000: isExt = True elif srchHFSPSig == binHFSJ and srchFIARes == bin000000: isHFSP = True elif srchHFSPSig == binHFSX and srchFIARes == bin000000: isHFSP = True ## Linux HSF+ driver fails if backup header is not where expected ## so find backup header before attempting to mount... if isHFSP: print('HFS+ filesystem found. Searching for backup volume header... ') for j in range (evsize-i): search.seek((evsize - j)*512 - 1024) bckHFSPSig = search.read(2) search.seek((evsize - j)*512 - 928) bckFIARes = search.read(3) if bckHFSPSig == binHFSJ and bckFIARes == bin000000: break elif bckHFSPSig == binHFSX and bckFIARes == bin000000: break elif j == (evsize-i)-5: search.close() rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' rmdmcmd3 = 'dmsetup remove ' + dmname + '_2' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) subprocess.call(rmdmcmd3, shell=True) if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('Unable to find backup volume header. Is volume corrupted?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: search.close() print('Hidden volume found ' + str((i+256)*512) + ' bytes into ' + args.FILE + ' using' + EN1 + 'then' + EN2 + 'then' + EN3) rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' rmdmcmd3 = 'dmsetup remove ' + dmname + '_2' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) subprocess.call(rmdmcmd3, shell=True) table1 = '"0 ' + str(evsize-(i+j)) + ' crypt' + EN1 + MK1 + ' ' + str(i+256) + ' ' + loopdev + ' ' + str(i+256) + '"' table2 = '"0 ' + str(evsize-(i+j)) + ' crypt' + EN2 + MK2 + ' ' + str(i+256) + ' ' + dmslot + '_2 0"' table3 = '"0 ' + str(evsize-(i+j)) + ' crypt' + EN3 + MK3 + ' ' + str(i+256) + ' ' + dmslot + '_1 0"' dmsetupcmd1 = 'dmsetup create ' + dmname + '_2 ' + ro + ' --table ' + table1 dmsetupcmd2 = 'dmsetup create ' + dmname + '_1 ' + ro + ' --table ' + table2 dmsetupcmd3 = 'dmsetup create ' + dmname + ' ' + ro + ' --table ' + table3 subprocess.call(dmsetupcmd1, shell=True) subprocess.call(dmsetupcmd2, shell=True) subprocess.call(dmsetupcmd3, shell=True) break elif i == evsize-1: search.close() rmdmcmd1 = 'dmsetup remove ' + dmname rmdmcmd2 = 'dmsetup remove ' + dmname + '_1' rmdmcmd3 = 'dmsetup remove ' + dmname + '_2' subprocess.call(rmdmcmd1, shell=True) subprocess.call(rmdmcmd2, shell=True) subprocess.call(rmdmcmd3, shell=True) if crypt == 'serpent-twofish-aes': if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) print('No volume decrypted in ' + args.FILE + '. Is masterkey correct?') exit(1) if srchOEM == binMSDOS or srchOEM == binMSWIN or srchOEM == binEXFAT or srchOEM == binNTFS or srchOEM == binMKDOS or srchOEM == binIBM or srchOEM == binFREEDOS or srchOEM == binMKFS or isExt or isHFSP: break ## if requested, mount the decrypted volume if mp: mountcmd = 'mount ' + ro + ' ' + dmslot + ' ' + args.mountpoint subprocess.call(mountcmd, shell=True) print(args.FILE + ' has been decrypted at ' + dmslot + ' and mounted at ' + args.mountpoint) else: print(args.FILE + ' is decrypted at ' + dmslot) ## pause until user presses enter while also checking that ## mount and device mapping are no longer being used mount=True while mount: while mount: input('Once done, press Enter to dismount ' + args.FILE + '...') if mp: umountcmd = 'umount ' + dmslot check = subprocess.call(umountcmd, shell=True, stderr=subprocess.DEVNULL) if not check == 0: print(args.mountpoint + " is still in use!") break elif args.verbose: print("Unmounted from " + args.mountpoint) if len(args.MASTERKEY) >= 128: rmdmcmd = 'dmsetup remove ' + dmname check = subprocess.call(rmdmcmd, shell=True, stderr=subprocess.DEVNULL) if not check == 0: print("Device mapping: " + dmslot + " is still in use!") break else: if args.verbose: print("Removed device mapping: " + dmslot) if len(args.MASTERKEY) == 128: mount=False if len(args.MASTERKEY) >= 256: rmdmcmd = 'dmsetup remove ' + dmname + '_1' subprocess.call(rmdmcmd, shell=True, stderr=subprocess.DEVNULL) if len(args.MASTERKEY) == 256: mount=False if len(args.MASTERKEY) == 384: rmdmcmd = 'dmsetup remove ' + dmname + '_2' subprocess.call(rmdmcmd, shell=True, stderr=subprocess.DEVNULL) mount=False if not isBLKDEV: subprocess.call(['losetup', '-d', loopdev]) if args.verbose: print("Removed loop device: " + loopdev) if __name__ == '__main__': main()
42.652504
206
0.629145
3,585
28,108
4.91325
0.12106
0.04292
0.029125
0.027421
0.752186
0.729874
0.715965
0.710344
0.695697
0.684853
0
0.048218
0.239291
28,108
658
207
42.717325
0.775559
0.122456
0
0.756184
0
0.001767
0.229826
0.014054
0
0
0
0
0
1
0.001767
false
0
0.008834
0
0.010601
0.068905
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
54a641444eb7e7d90e7bcdd09b05e094cbc229e2
2,163
py
Python
tests/test_connection.py
bureau14/qdb-api-python
2a010df3252d39bc4d529f545547c5cefb9fe86e
[ "BSD-3-Clause" ]
9
2015-09-02T20:13:13.000Z
2020-07-16T14:17:36.000Z
tests/test_connection.py
bureau14/qdb-api-python
2a010df3252d39bc4d529f545547c5cefb9fe86e
[ "BSD-3-Clause" ]
5
2018-02-20T10:47:02.000Z
2020-05-20T10:05:49.000Z
tests/test_connection.py
bureau14/qdb-api-python
2a010df3252d39bc4d529f545547c5cefb9fe86e
[ "BSD-3-Clause" ]
1
2018-04-01T11:12:56.000Z
2018-04-01T11:12:56.000Z
# pylint: disable=C0103,C0111,C0302,W0212 import unittest import pytest import quasardb def test_connect_throws_input_error__when_uri_is_invalid(): with pytest.raises(quasardb.Error): quasardb.Cluster(uri='invalid_uri') def test_connect_throws_connection_error_when_no_cluster_on_given_uri(): with pytest.raises(quasardb.Error): quasardb.Cluster(uri='qdb://127.0.0.1:1') def test_connect_throws_connection_error_when_no_cluster_public_key( qdbd_settings): with pytest.raises(quasardb.Error): quasardb.Cluster( uri=qdbd_settings.get("uri").get("secure"), user_name=qdbd_settings.get("security").get("user_name"), user_private_key=qdbd_settings.get("security").get("user_private_key")) def test_connect_throws_connection_error_when_no_user_name(qdbd_settings): with pytest.raises(quasardb.Error): quasardb.Cluster( uri=qdbd_settings.get("uri").get("secure"), user_private_key=qdbd_settings.get( "security").get("user_private_key"), cluster_public_key=qdbd_settings.get("security").get("cluster_public_key")) def test_connect_throws_connection_error_when_no_user_private_key( qdbd_settings): with pytest.raises(quasardb.Error): quasardb.Cluster( uri=qdbd_settings.get("uri").get("secure"), user_name=qdbd_settings.get("security").get("user_name"), cluster_public_key=qdbd_settings.get("security").get("cluster_public_key")) def test_connect_ok_to_secure_cluster(qdbd_settings): quasardb.Cluster( uri=qdbd_settings.get("uri").get("secure"), user_name=qdbd_settings.get("security").get("user_name"), user_private_key=qdbd_settings.get("security").get("user_private_key"), cluster_public_key=qdbd_settings.get("security").get("cluster_public_key")) def test_connect_with_open_to_secure_cluster(qdbd_settings): with quasardb.Cluster(uri=qdbd_settings.get("uri").get("insecure")) as conn: topology = conn.node_topology( qdbd_settings.get('uri').get('insecure')) assert len(topology) > 0
36.661017
87
0.714286
285
2,163
5.042105
0.178947
0.167015
0.156576
0.14405
0.850383
0.803758
0.77801
0.77801
0.685456
0.61865
0
0.013245
0.162275
2,163
58
88
37.293103
0.779801
0.018031
0
0.512195
0
0
0.13525
0
0
0
0
0
0.02439
1
0.170732
false
0
0.073171
0
0.243902
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
54aeb4d84ec0152af4725f5144f55305fddb5aad
166
py
Python
raspi_components/variable_resistor/resistor_errors.py
builderdev212/raspi_components
edbcb5b8eed6b1e4a2f1cc41ce08ea9b25051495
[ "MIT" ]
1
2021-11-09T16:29:45.000Z
2021-11-09T16:29:45.000Z
raspi_components/variable_resistor/resistor_errors.py
builderdev212/raspi_components
edbcb5b8eed6b1e4a2f1cc41ce08ea9b25051495
[ "MIT" ]
null
null
null
raspi_components/variable_resistor/resistor_errors.py
builderdev212/raspi_components
edbcb5b8eed6b1e4a2f1cc41ce08ea9b25051495
[ "MIT" ]
null
null
null
class ResistorError(Exception): """ Raised when there is an error while working with the VariableResistor class. """ def __init__(self): pass
23.714286
80
0.662651
19
166
5.578947
0.947368
0
0
0
0
0
0
0
0
0
0
0
0.259036
166
6
81
27.666667
0.861789
0.457831
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.333333
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
54b7d521b6e8b2631e621d015681863d3d80e50c
1,292
py
Python
problem8.py
asuttles/euler
dfcacf7175e639c9a84ddd0e09a3de8f05ff4a23
[ "Unlicense" ]
null
null
null
problem8.py
asuttles/euler
dfcacf7175e639c9a84ddd0e09a3de8f05ff4a23
[ "Unlicense" ]
null
null
null
problem8.py
asuttles/euler
dfcacf7175e639c9a84ddd0e09a3de8f05ff4a23
[ "Unlicense" ]
null
null
null
numStr = """ 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450""".replace('\n', '') maxProd = 0 for i in range(len(numStr)-13): prod = 1 for j in range(13): prod *= int(numStr[i+j]) if prod > maxProd: maxProd = prod print("The maximum product is: ", maxProd)
39.151515
72
0.876161
55
1,292
20.581818
0.745455
0.012367
0
0
0
0
0
0
0
0
0
0.85617
0.090557
1,292
32
73
40.375
0.107234
0
0
0
0
0
0.830159
0.793651
0
0
0
0
0
1
0
false
0
0
0
0
0.034483
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
54c0024080c3fe302e9d356ca747094f607c5587
164
py
Python
python/testData/inspections/PyUnresolvedReferencesInspection3K/NamespacePackageRedundantUnion/a.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2018-12-29T09:53:39.000Z
2018-12-29T09:53:42.000Z
python/testData/inspections/PyUnresolvedReferencesInspection3K/NamespacePackageRedundantUnion/a.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/PyUnresolvedReferencesInspection3K/NamespacePackageRedundantUnion/a.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
1
2019-10-12T22:37:24.000Z
2019-10-12T22:37:24.000Z
<warning descr="Unused import statement">from nspkg1 import <error descr="Cannot find reference 'not_found' in 'imported module nspkg1'">not_found</error></warning>
164
164
0.792683
23
164
5.565217
0.695652
0.125
0
0
0
0
0
0
0
0
0
0.013333
0.085366
164
1
164
164
0.84
0
0
0
0
0
0.509091
0
0
0
0
0
0
0
null
null
0
1
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
54c0c58f6c3108e04bcf42887a86fa7d0b7a7e08
3,411
py
Python
4-Object_Detection/RPN/backend.py
anandpaiv/TensorFlow2.0-Examples
f62a9b81ff00e697c7bd810273958b2226baa860
[ "MIT" ]
null
null
null
4-Object_Detection/RPN/backend.py
anandpaiv/TensorFlow2.0-Examples
f62a9b81ff00e697c7bd810273958b2226baa860
[ "MIT" ]
null
null
null
4-Object_Detection/RPN/backend.py
anandpaiv/TensorFlow2.0-Examples
f62a9b81ff00e697c7bd810273958b2226baa860
[ "MIT" ]
1
2019-10-05T16:38:16.000Z
2019-10-05T16:38:16.000Z
#! /usr/bin/env python # coding=utf-8 #================================================================ # Copyright (C) 2019 * Ltd. All rights reserved. # # Editor : VIM # File name : backend.py # Author : YunYang1994 # Created date: 2019-07-16 00:24:11 # Description : # #================================================================ import numpy as np import tensorflow as tf weights = np.load("./vgg16.npy", encoding='latin1').item() inputs = tf.keras.layers.Input([224, 224, 3]) # Block 1 conv1_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv1_1')(inputs) conv1_2 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv1_2')(conv1_1) pool1_1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1_1') # Block 2 conv2_1 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv2_1')(pool1_1) conv2_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv2_2')(conv2_1) pool2_1 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2_1') # Block 3 conv3_1 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv3_1')(pool2_1) conv3_2 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv3_2')(conv3_1) conv3_3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv3_3')(conv3_2) pool3_1 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3_1') # Block 4 conv4_1 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv4_1')(pool3_1) conv4_2 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv4_2')(conv4_1) conv4_3 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv4_3')(conv4_2) pool4_1 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4_1') # Block 5 conv5_1 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv5_1')(pool4_1) conv5_2 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv5_2')(conv5_1) conv5_3 = tf.keras.layers.Conv2D(filters=512, kernel_size=3, strides=[1, 1], padding='same', activation='relu', use_bias=True, name='conv5_3')(conv5_2) model = tf.keras.Model(inputs, conv)
47.375
107
0.593668
500
3,411
3.89
0.17
0.069923
0.104884
0.126992
0.732648
0.707969
0.70437
0.70437
0.70437
0.70437
0
0.095186
0.202287
3,411
71
108
48.042254
0.619625
0.110818
0
0
0
0
0.084993
0
0
0
0
0
0
1
0
false
0
0.057143
0
0.057143
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
49c9133e250d775b590a619a074eab228b169edc
418
py
Python
paymaster/exceptions.py
IDilettant/paymaster_
d48549c714aa08876aadf3dc7f3456e47d581938
[ "MIT" ]
1
2021-12-12T09:49:23.000Z
2021-12-12T09:49:23.000Z
paymaster/exceptions.py
IDilettant/paymaster_
d48549c714aa08876aadf3dc7f3456e47d581938
[ "MIT" ]
1
2021-11-29T18:15:09.000Z
2021-12-08T17:13:55.000Z
paymaster/exceptions.py
IDilettant/paymaster
d48549c714aa08876aadf3dc7f3456e47d581938
[ "MIT" ]
null
null
null
"""Exceptions module.""" class PaymasterException(Exception): """Base app exception.""" pass class AccountError(PaymasterException): """Exception of no account in database.""" pass class CurrencyError(PaymasterException): """Exception of no currency in currencies table.""" pass class BalanceValueError(PaymasterException): """Exception of negative account balance.""" pass
16.076923
55
0.698565
39
418
7.487179
0.538462
0.369863
0.297945
0.212329
0
0
0
0
0
0
0
0
0.188995
418
25
56
16.72
0.861357
0.382775
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
49d736ef1460d3cb4343de30155638ff7d197537
6,531
py
Python
cpp_src/cmd/reindexer_server/test/specs/queries_test.py
radiophysicist/reindexer
5a6e82158665f6e9c78536cc4a907143a157f1d7
[ "Apache-2.0" ]
null
null
null
cpp_src/cmd/reindexer_server/test/specs/queries_test.py
radiophysicist/reindexer
5a6e82158665f6e9c78536cc4a907143a157f1d7
[ "Apache-2.0" ]
null
null
null
cpp_src/cmd/reindexer_server/test/specs/queries_test.py
radiophysicist/reindexer
5a6e82158665f6e9c78536cc4a907143a157f1d7
[ "Apache-2.0" ]
null
null
null
import random from specs import BaseTest class QueriesTest(BaseTest): def setUp(self): super().setUp() self.helper_queries_testdata_prepare() def test_query_sql(self): """Should be able to execute an sql query""" sql_query = 'SELECT COUNT(*),* FROM ' + self.current_ns status, body = self.api_sql_exec(self.current_db, sql_query) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, 'query_total_items' in body, body) def test_query_sql_post(self): """Should be able to post an sql query""" query_body = 'SELECT COUNT(*),* FROM ' + self.current_ns status, body = self.api_sql_post(self.current_db, query_body) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, 'query_total_items' in body, body) def test_query_dsl_(self): """Should be able to exec a dsl query""" query_dsl = self.helper_query_dsl_construct(self.current_ns) status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) def test_query_dsl_sort_asc(self): """Should be able to exec a dsl query and get asc-sorted item list""" sort_field = self.helper_items_first_key_of_item(self.items) sort_desc = False sort = self.helper_query_dsl_sort_construct(sort_field, sort_desc) query_dsl = self.helper_query_dsl_construct(self.current_ns, sort=sort) status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, body['items'][0][sort_field] < body['items'][-1][sort_field], body) def test_query_dsl_sort_desc(self): """Should be able to exec a dsl query and get desc-sorted item list""" sort_field = self.helper_items_first_key_of_item(self.items) sort_desc = True sort = self.helper_query_dsl_sort_construct(sort_field, sort_desc) query_dsl = self.helper_query_dsl_construct(self.current_ns, sort=sort) status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, body['items'][0][sort_field] > body['items'][-1][sort_field], body) def test_query_dsl_distinct(self): """Should be able to exec a dsl query and get distinct item list""" status, body = self.api_get_items(self.current_db, self.current_ns) self.assertEqual(True, status == self.API_STATUS['success'], body) total_items = body['total_items'] items = [] items_count = 10 distinct_field_value_random = random.randint(0x1FFFFFFF, 0x7FFFFFFF) items = self.helper_item_array_construct(items_count) pk_field_name = self.helper_items_first_key_of_item(items) test_field_name = self.helper_items_second_key_of_item(items) for i in range(0, items_count): items[i][pk_field_name] = i + 1000 items[i][test_field_name] = distinct_field_value_random for item_body in items: status, body = self.api_create_item( self.current_db, self.current_ns, item_body) self.assertEqual(True, status == self.API_STATUS['success'], body) distinct = self.helper_items_second_key_of_item(items) limit = total_items + items_count query_dsl = self.helper_query_dsl_construct( self.current_ns, distinct=distinct, limit=limit,req_total="enabled") status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, 'query_total_items' in body, body) self.assertEqual( True, body['query_total_items'] == total_items + 1, body) def test_query_dsl_paginate(self): """Should be able to exec a dsl query and pagination works correct""" items = [] items_count = 10 items = self.helper_item_array_construct(items_count) pk_field_name = self.helper_items_first_key_of_item(items) for i in range(0, items_count): items[i][pk_field_name] = i + 1000 limit = 1 offset = self.items_count - 1 query_dsl = self.helper_query_dsl_construct( self.current_ns, limit=limit, offset=offset) status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, self.items[-1] in body['items'], body) self.assertEqual(True, len(body['items']) == 1, body) def test_query_dsl_total(self): """Should be able to exec a dsl query and get total_items""" query_dsl = self.helper_query_dsl_construct( self.current_ns, req_total='enabled') status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, 'query_total_items' in body, body) self.assertEqual(True, body['query_total_items'] == self.items_count, body) def test_query_dsl_filter_eq(self): """Should be able to exec a dsl query with EQ filter""" test_field_name = self.helper_items_second_key_of_item(self.items) test_value = 2 filter = self.helper_query_dsl_filter_construct( test_field_name, 'EQ', 'AND', test_value) filters = [] filters.append(filter) query_dsl = self.helper_query_dsl_construct( self.current_ns, filters=filters) status, body = self.api_query_dsl(self.current_db, query_dsl) self.assertEqual(True, status == self.API_STATUS['success'], body) self.assertEqual(True, 'items' in body, body) self.assertEqual(True, self.items[0] in body['items'], body)
39.823171
83
0.662226
897
6,531
4.548495
0.098105
0.07451
0.144363
0.12402
0.785539
0.769853
0.755147
0.742892
0.729902
0.702696
0
0.005767
0.22998
6,531
163
84
40.067485
0.805528
0.071811
0
0.514019
0
0
0.055685
0
0
0
0.003324
0
0.28972
1
0.093458
false
0
0.018692
0
0.121495
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
49e6f82b2e8558b09aa633afaae5b29f72680ff3
165
py
Python
multigrids/__init__.py
rwspicer/multigrids
e4f18144f0e95fbf7da8e17ad2fcc2428763ce35
[ "MIT" ]
null
null
null
multigrids/__init__.py
rwspicer/multigrids
e4f18144f0e95fbf7da8e17ad2fcc2428763ce35
[ "MIT" ]
4
2021-11-02T21:09:58.000Z
2022-02-09T01:33:32.000Z
multigrids/__init__.py
rwspicer/multigrids
e4f18144f0e95fbf7da8e17ad2fcc2428763ce35
[ "MIT" ]
1
2020-06-22T20:39:04.000Z
2020-06-22T20:39:04.000Z
from .__metadata__ import * from .multigrid import MultiGrid from .temporal import TemporalMultiGrid from .grid import Grid from .temporal_grid import TemporalGrid
23.571429
39
0.836364
20
165
6.65
0.4
0.180451
0
0
0
0
0
0
0
0
0
0
0.127273
165
6
40
27.5
0.923611
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b71ac3443df007ad8d892c3d528ccd7f09cba4b7
10,413
py
Python
test/test.py
SlavaSereb/fireblocks-key-recovery-tool
c17fc1ed6a58a3b987a5b082254978469a9d702e
[ "MIT" ]
2
2019-07-09T16:13:35.000Z
2021-01-28T14:13:31.000Z
test/test.py
SlavaSereb/fireblocks-key-recovery-tool
c17fc1ed6a58a3b987a5b082254978469a9d702e
[ "MIT" ]
1
2020-12-01T05:51:40.000Z
2020-12-01T05:54:14.000Z
test/test.py
SlavaSereb/fireblocks-key-recovery-tool
c17fc1ed6a58a3b987a5b082254978469a9d702e
[ "MIT" ]
6
2019-11-12T11:52:31.000Z
2021-03-15T14:20:26.000Z
import pytest import sys sys.path.append("..") from utils import recover def test_recovery(): result = recover.restore_key_and_chaincode("backup.zip", "priv.pem", "Thefireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_ECDSA_SECP256K1'] assert(ecdsa_priv_key == 0x473d1820ca4bf7cf6b018a8520b1ec0849cb99bce4fff45c5598723f67b3bd52) pub = recover.get_public_key("MPC_ECDSA_SECP256K1", ecdsa_priv_key) assert(pub == "021d84f3b6d7c6888f81c7cc381b658d85319f27e1ea9c93dff128667fb4b82ba0") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9aunJDs4SsrmoxycAo6xxBTHawSz5sYxEy8TpCkv66Sci373DJ") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6QJJZSgiCXT6sq7wa2jCk5t4Vv1r1E4q1venKghAAdyzieufGyX") print("recovery OK") def test_full_recovery(): result = recover.restore_key_and_chaincode("backup_new.zip", "priv2.pem", "Thefireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_ECDSA_SECP256K1'] eddsa_priv_key, eddsa_chaincode = result['MPC_EDDSA_ED25519'] assert(ecdsa_priv_key == 0x66b1baf063db6e7152480334ebab0ab098e85f682b784754e46c18c962a1aa9d) assert(eddsa_priv_key == 0xd74820d02cc2aa09e2d0bcb36aeb92625b3d92c8d202063eab5513fd4453a44) assert(ecdsa_chaincode == bytes.fromhex('5d90bd21d2273a25d0aea082716bdc4529e007823260ad3479182f6672c25cc4')) assert(eddsa_chaincode == bytes.fromhex('5d90bd21d2273a25d0aea082716bdc4529e007823260ad3479182f6672c25cc4')) pub = recover.get_public_key("MPC_ECDSA_SECP256K1", ecdsa_priv_key) assert(pub == "02e0bf609d7ced9c49e9f4c1d1df0142bb95eb622fa617a9f7280fa23b7f013dc6") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K2zPNSbKDKusTNW4XVwvTCCEFvcLkeNyauqJJd9UjZg3AtfZbmXa22TFph2NdACUPoWR4sCqMCKQM1j7jRvLuBCF3YoapsX6") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcFUTqYcrDh3pBvXu1uQeJZR9rizkNCiWZnddTAgnz7UMejwX7u4xLmh2JMTtL7DdZmBWGUKa7v836UarassQ3DVFATMzRycV") pub = recover.get_public_key("MPC_EDDSA_ED25519", eddsa_priv_key) assert(pub == "0050cfee85dabebed78f43e94a1b7afd13c20461ad66efa083779bdeffd22269d9") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', eddsa_priv_key, eddsa_chaincode, False) == "fprv4LsXPWzhTTp9ax8NGVwbnRFuT3avVQ4ydHNWcu8hCGZd18TRKxgAzbrpY9bLJRe4Y2AyX9TfQdDPbmqEYoDCTju9QFZbUgdsxsmUgfvuEDK") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', pub, eddsa_chaincode, True) == "fpub8sZZXw2wbqVpURAAA9cCBpv2256rejFtCayHuRAzcYN1qciBxMVmB6UgiDAQTUZh5EP9JZciPQPjKAHyqPYHELqEHWkvo1sxreEJgLyfCJj") print("recovery OK") def test_recovery_old_format(): result = recover.restore_key_and_chaincode("backup_old_format.zip", "priv.pem", "Thefireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_ECDSA_SECP256K1'] assert(ecdsa_priv_key == 0x473d1820ca4bf7cf6b018a8520b1ec0849cb99bce4fff45c5598723f67b3bd52) pub = recover.get_public_key("MPC_ECDSA_SECP256K1", ecdsa_priv_key) assert(pub == "021d84f3b6d7c6888f81c7cc381b658d85319f27e1ea9c93dff128667fb4b82ba0") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9aunJDs4SsrmoxycAo6xxBTHawSz5sYxEy8TpCkv66Sci373DJ") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6QJJZSgiCXT6sq7wa2jCk5t4Vv1r1E4q1venKghAAdyzieufGyX") print("recovery (old format) OK") def test_cmp_recovery(): result = recover.restore_key_and_chaincode("backup_cmp.zip", "priv.pem", "Fireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_CMP_ECDSA_SECP256K1'] eddsa_priv_key, eddsa_chaincode = result['MPC_CMP_EDDSA_ED25519'] assert(ecdsa_priv_key == 0xf57c18e98a24ca0b36fbbd103233aff128b740426da189ce208545d44bbad050) assert(eddsa_priv_key == 0xa536dc2f2d744ae78eb26fdfb4b9e234a649525e0a1142bf900cd9c26987007) pub = recover.get_public_key("MPC_CMP_ECDSA_SECP256K1", ecdsa_priv_key) assert(pub == "03321ad97aea16624280b83e1c1b36bb9cb293cac84925fe5fcf956386cd063fec") assert(recover.encode_extended_key('MPC_CMP_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K3PhnQQqPZm38HtkJ3bjcVmwc1SfGG8ddw3jXtrhSBNFNcVVx7VUL8vPpmMg1dqxhecVq8WJ1VHn9yoeRM88qfYEnEEi6XaQ") assert(recover.encode_extended_key('MPC_CMP_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcFsnFWSNPvtyrqvanT4TTrzsCoq4spUAcor4gSQ1gjAZrTkzR1o8XZ5uPq6WELaga3Zh1eJyfXLvfkWTfV7AjdFU5VuWMpPp") pub = recover.get_public_key("MPC_CMP_EDDSA_ED25519", eddsa_priv_key) assert(pub == "00701c977bd4d2038328dd8154c147f9d40225fc8e9fd98c010cc968ea8fabb362") assert(recover.encode_extended_key('MPC_CMP_EDDSA_ED25519', eddsa_priv_key, eddsa_chaincode, False) == "fprv4LsXPWzhTTp9bMSnEKTn2GRaNSGh33t8vs5rhjTCp2Dg2LtebftscJ52FxRRKeHGLfK6X5Lg3LcsGxQyHZ8ovvPsP2s9PLbZC2VFHc64vFH") assert(recover.encode_extended_key('MPC_CMP_EDDSA_ED25519', pub, eddsa_chaincode, True) == "fpub8sZZXw2wbqVpUpUa7y8NRg5gwTndCP53WAgdzFVWEJ24rq9RE4iTnngtS2FeusezUsAJb2sZiMvSDqYGeGVSs65wJqYcGzQRuZGM9NHHqog") print("cmp recovery OK") def test_one_custom_chaincode_recovery(): ''' The zip in this test was built from 'backup_new.zip', the file used in test_full_recovery() The only change is in an alternative chain code assigned specifically to MPC_ECDSA_SECP256K1, while MPC_EDDSA_ED25519 is not assigned a specific chaincode. Hence all the extracted keys are they same, and differce lies mostly in the extended form of the key, which encodes the chaincode. ''' result = recover.restore_key_and_chaincode("backup_with_one_custom_chaincode.zip", "priv2.pem", "Thefireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_ECDSA_SECP256K1'] eddsa_priv_key, eddsa_chaincode = result['MPC_EDDSA_ED25519'] assert(ecdsa_chaincode != eddsa_chaincode) assert(ecdsa_priv_key == 0x66b1baf063db6e7152480334ebab0ab098e85f682b784754e46c18c962a1aa9d) assert(eddsa_priv_key == 0xd74820d02cc2aa09e2d0bcb36aeb92625b3d92c8d202063eab5513fd4453a44) assert(ecdsa_chaincode == bytes.fromhex('865b4d6e745c64afc98a7fe32103d6ea775910d4d58e00fe17d2fdd4f8f8f1d0')) assert(eddsa_chaincode == bytes.fromhex('5d90bd21d2273a25d0aea082716bdc4529e007823260ad3479182f6672c25cc4')) pub = recover.get_public_key("MPC_ECDSA_SECP256K1", ecdsa_priv_key) assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K3PwZ9jrXG7MZXgj92u6eeCz6M8w8a5RGYJoNmWQRA2eso47rJHr9qawKR9tQVTRki8XUPwVSuBPSnVxT6mQb99XUbruDGk7") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcFt22FmPXdFJJ5iZdSMpW1Ruh9XLk8QxFR78XK3ifhpyMeL5NRqEUapho5bQ7SUavfocg14EDcz2CFMhJYiTjBSXbWQcdkrR") pub = recover.get_public_key("MPC_EDDSA_ED25519", eddsa_priv_key) assert(pub == "0050cfee85dabebed78f43e94a1b7afd13c20461ad66efa083779bdeffd22269d9") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', eddsa_priv_key, eddsa_chaincode, False) == "fprv4LsXPWzhTTp9ax8NGVwbnRFuT3avVQ4ydHNWcu8hCGZd18TRKxgAzbrpY9bLJRe4Y2AyX9TfQdDPbmqEYoDCTju9QFZbUgdsxsmUgfvuEDK") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', pub, eddsa_chaincode, True) == "fpub8sZZXw2wbqVpURAAA9cCBpv2256rejFtCayHuRAzcYN1qciBxMVmB6UgiDAQTUZh5EP9JZciPQPjKAHyqPYHELqEHWkvo1sxreEJgLyfCJj") print("recovery OK") def test_two_custom_chaincode_recovery(): ''' The zip in this test was built from 'backup_new.zip', the file used in test_full_recovery() The only changes are two different chain code assigned specifically to MPC_ECDSA_SECP256K1 and MPC_EDDSA_ED25519. The chaincode assigned to MPC_ECDSA_SECP256K1 is the same as the one in 'backup_with_one_custom_chaincode.zip', the file used in test_one_custom_chaincode_recovery() Hence all the extracted keys are they same: only the extended forms of the keys are different, as they encode the respective chaincodes. ''' result = recover.restore_key_and_chaincode("backup_with_two_custom_chaincode.zip", "priv2.pem", "Thefireblocks1!") ecdsa_priv_key, ecdsa_chaincode = result['MPC_ECDSA_SECP256K1'] eddsa_priv_key, eddsa_chaincode = result['MPC_EDDSA_ED25519'] assert(ecdsa_chaincode != eddsa_chaincode) assert(ecdsa_priv_key == 0x66b1baf063db6e7152480334ebab0ab098e85f682b784754e46c18c962a1aa9d) assert(eddsa_priv_key == 0xd74820d02cc2aa09e2d0bcb36aeb92625b3d92c8d202063eab5513fd4453a44) assert(ecdsa_chaincode == bytes.fromhex('865b4d6e745c64afc98a7fe32103d6ea775910d4d58e00fe17d2fdd4f8f8f1d0')) assert(eddsa_chaincode == bytes.fromhex('89b11d04462618fa6d3981f891f2ae8968d8762f268fdec0a4c440ecafb072dd')) pub = recover.get_public_key("MPC_ECDSA_SECP256K1", ecdsa_priv_key) assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', ecdsa_priv_key, ecdsa_chaincode, False) == "xprv9s21ZrQH143K3PwZ9jrXG7MZXgj92u6eeCz6M8w8a5RGYJoNmWQRA2eso47rJHr9qawKR9tQVTRki8XUPwVSuBPSnVxT6mQb99XUbruDGk7") assert(recover.encode_extended_key('MPC_ECDSA_SECP256K1', pub, ecdsa_chaincode, True) == "xpub661MyMwAqRbcFt22FmPXdFJJ5iZdSMpW1Ruh9XLk8QxFR78XK3ifhpyMeL5NRqEUapho5bQ7SUavfocg14EDcz2CFMhJYiTjBSXbWQcdkrR") pub = recover.get_public_key("MPC_EDDSA_ED25519", eddsa_priv_key) assert(pub == "0050cfee85dabebed78f43e94a1b7afd13c20461ad66efa083779bdeffd22269d9") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', eddsa_priv_key, eddsa_chaincode, False) == "fprv4LsXPWzhTTp9bPcFjmqM7U4drbDYRi1YzzFgrfnWAH3hsnLWeJioBzvyvwYJ5p5SuXjwhVd41wrB3tR1Ep41U2DpkJM3J9JGkuCKiBAyyGz") assert(recover.encode_extended_key('MPC_EDDSA_ED25519', pub, eddsa_chaincode, True) == "fpub8sZZXw2wbqVpUre3dRVwWsikRcjUb3CTaHrU9BpoaYr6iGbHGhYPNVYr717NEs15Sjx7Uun6zj2WmGskXQP6Ed9udZYNcUYMeff9hsYTcyr") print("recovery OK") if __name__ == '__main__': test_recovery() test_full_recovery() test_recovery_old_format() test_cmp_recovery() test_one_custom_chaincode_recovery() test_two_custom_chaincode_recovery()
73.330986
223
0.836166
925
10,413
9.021622
0.12973
0.033553
0.034512
0.064709
0.767406
0.754823
0.740084
0.724266
0.660395
0.631037
0
0.163656
0.09104
10,413
141
224
73.851064
0.718014
0.085566
0
0.536842
0
0
0.450636
0.360275
0
0
0.069492
0
0.484211
1
0.063158
false
0
0.031579
0
0.094737
0.063158
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
b73780eef2bf48771afe9fa2a602e87a5fc44603
3,575
py
Python
ooi_instrument_agent/test/responses.py
oceanobservatories/ooi-instrument-agent
e22e4300079468bb99c543cbbf1cb5c8b4a96897
[ "Apache-2.0" ]
null
null
null
ooi_instrument_agent/test/responses.py
oceanobservatories/ooi-instrument-agent
e22e4300079468bb99c543cbbf1cb5c8b4a96897
[ "Apache-2.0" ]
null
null
null
ooi_instrument_agent/test/responses.py
oceanobservatories/ooi-instrument-agent
e22e4300079468bb99c543cbbf1cb5c8b4a96897
[ "Apache-2.0" ]
null
null
null
health_response = '''[ { "Checks": [ { "CheckID": "service:instrument_driver_RS10ENGC-XX00X-00-SPKIRA001", "Name": "Service 'instrument_driver' check", "Node": "uft21", "Notes": "", "Output": "", "ServiceID": "instrument_driver_RS10ENGC-XX00X-00-SPKIRA001", "ServiceName": "instrument_driver", "Status": "passing" }, { "CheckID": "serfHealth", "Name": "Serf Health Status", "Node": "uft21", "Notes": "", "Output": "Agent alive and reachable", "ServiceID": "", "ServiceName": "", "Status": "passing" } ], "Node": { "Address": "128.6.240.39", "Node": "uft21" }, "Service": { "Address": "", "ID": "instrument_driver_RS10ENGC-XX00X-00-SPKIRA001", "Port": 42558, "Service": "instrument_driver", "Tags": [ "RS10ENGC-XX00X-00-SPKIRA001" ] } }, { "Checks": [ { "CheckID": "service:instrument_driver_RS10ENGC-XX00X-00-TMPSFA001", "Name": "Service 'instrument_driver' check", "Node": "uft21", "Notes": "", "Output": "", "ServiceID": "instrument_driver_RS10ENGC-XX00X-00-TMPSFA001", "ServiceName": "instrument_driver", "Status": "passing" }, { "CheckID": "serfHealth", "Name": "Serf Health Status", "Node": "uft21", "Notes": "", "Output": "Agent alive and reachable", "ServiceID": "", "ServiceName": "", "Status": "passing" } ], "Node": { "Address": "128.6.240.39", "Node": "uft21" }, "Service": { "Address": "", "ID": "instrument_driver_RS10ENGC-XX00X-00-TMPSFA001", "Port": 41799, "Service": "instrument_driver", "Tags": [ "RS10ENGC-XX00X-00-TMPSFA001" ] } } ]''' port_agent_response = '''[ { "Checks": [ { "CheckID": "service:port-agent-RS10ENGC-XX00X-00-BOTPTA001", "Name": "Service 'port-agent' check", "Node": "uft21", "Notes": "", "Output": "", "ServiceID": "port-agent-RS10ENGC-XX00X-00-BOTPTA001", "ServiceName": "port-agent", "Status": "passing" }, { "CheckID": "serfHealth", "Name": "Serf Health Status", "Node": "uft21", "Notes": "", "Output": "Agent alive and reachable", "ServiceID": "", "ServiceName": "", "Status": "passing" } ], "Node": { "Address": "128.6.240.39", "Node": "uft21" }, "Service": { "Address": "", "ID": "port-agent-RS10ENGC-XX00X-00-BOTPTA001", "Port": 41347, "Service": "port-agent", "Tags": [ "RS10ENGC-XX00X-00-BOTPTA001" ] } } ]'''
30.555556
83
0.387133
230
3,575
5.926087
0.173913
0.140866
0.132062
0.12766
0.880411
0.867938
0.730741
0.669112
0.594277
0.594277
0
0.086598
0.457343
3,575
116
84
30.818966
0.615979
0
0
0.591304
0
0
0.984615
0.146014
0
0
0
0
0
1
0
false
0.052174
0
0
0
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
3f83407a9f58d733df9cdb9a852f98f8996c9fa2
139
py
Python
custom_components/ewii/pyewii/__init__.py
raakilde/hacs_addon
5688609ab54fae2d1817acbe825399d05c6662d7
[ "Apache-2.0" ]
null
null
null
custom_components/ewii/pyewii/__init__.py
raakilde/hacs_addon
5688609ab54fae2d1817acbe825399d05c6662d7
[ "Apache-2.0" ]
null
null
null
custom_components/ewii/pyewii/__init__.py
raakilde/hacs_addon
5688609ab54fae2d1817acbe825399d05c6662d7
[ "Apache-2.0" ]
null
null
null
""" Init file for pyewii """ from .ewii import Ewii from .models import TimeSeries from .models import RawMeterData __version__ = "0.5.0"
15.444444
32
0.741007
20
139
4.95
0.65
0.20202
0.323232
0
0
0
0
0
0
0
0
0.025641
0.158273
139
8
33
17.375
0.820513
0.143885
0
0
0
0
0.045045
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
b2100d37be87232bfc534454873162f78b195097
191
py
Python
code_snippets/api-alert-mute.py
brettlangdon/documentation
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
[ "BSD-3-Clause" ]
null
null
null
code_snippets/api-alert-mute.py
brettlangdon/documentation
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
[ "BSD-3-Clause" ]
null
null
null
code_snippets/api-alert-mute.py
brettlangdon/documentation
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
[ "BSD-3-Clause" ]
null
null
null
from dogapi import dog_http_api as api api.api_key = '9775a026f1ca7d1c6c5af9d94d9595a4' api.application_key = '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff' # Mute all alerts api.mute_alerts()
23.875
64
0.837696
22
191
7.045455
0.636364
0.077419
0
0
0
0
0
0
0
0
0
0.261628
0.099476
191
7
65
27.285714
0.639535
0.078534
0
0
0
0
0.413793
0.413793
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b766f104343393a31c7ae0559035981b390e3df6
114
py
Python
notifications/admin.py
jeffsimp88/twitterclone
696aa05da4feae15d7a0c2296a8d74be4ee32286
[ "MIT" ]
null
null
null
notifications/admin.py
jeffsimp88/twitterclone
696aa05da4feae15d7a0c2296a8d74be4ee32286
[ "MIT" ]
null
null
null
notifications/admin.py
jeffsimp88/twitterclone
696aa05da4feae15d7a0c2296a8d74be4ee32286
[ "MIT" ]
null
null
null
from django.contrib import admin from notifications.models import Notification admin.site.register(Notification)
22.8
45
0.859649
14
114
7
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.087719
114
4
46
28.5
0.942308
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b769adfb80a9574b865d4ec01ad88141a9a905ac
43
py
Python
protobuf_weather_station/__init__.py
Telluric/Protobuf-Weather-Station.mpy
60619384bce797bca4178268aa2fd6516a77402f
[ "MIT" ]
null
null
null
protobuf_weather_station/__init__.py
Telluric/Protobuf-Weather-Station.mpy
60619384bce797bca4178268aa2fd6516a77402f
[ "MIT" ]
null
null
null
protobuf_weather_station/__init__.py
Telluric/Protobuf-Weather-Station.mpy
60619384bce797bca4178268aa2fd6516a77402f
[ "MIT" ]
null
null
null
from .WeatherStation import WeatherStation
21.5
42
0.883721
4
43
9.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.093023
43
1
43
43
0.974359
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b7a2e0f97d3b9fdfeffa62a3542098fac97310dc
41
py
Python
conf.py
vminkov/sentinel-trails-documentation
d87d3129337ca47b28a2e94fc11ff9ce0a834225
[ "Apache-2.0" ]
null
null
null
conf.py
vminkov/sentinel-trails-documentation
d87d3129337ca47b28a2e94fc11ff9ce0a834225
[ "Apache-2.0" ]
null
null
null
conf.py
vminkov/sentinel-trails-documentation
d87d3129337ca47b28a2e94fc11ff9ce0a834225
[ "Apache-2.0" ]
null
null
null
extensions = ['sphinxcontrib.contentui']
20.5
40
0.780488
3
41
10.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.073171
41
1
41
41
0.842105
0
0
0
0
0
0.560976
0.560976
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d32cc0df613edc0b23134b438a5caa86cb830c8
37
py
Python
InventoryOptimExample/__init__.py
abbasmalekpour/InventoryOptim
20e0a2311d270341e91ed8af3b90f416b8efb02b
[ "MIT" ]
1
2020-12-09T01:32:07.000Z
2020-12-09T01:32:07.000Z
InventoryOptimExample/__init__.py
abbasmalekpour/InventoryOptim
20e0a2311d270341e91ed8af3b90f416b8efb02b
[ "MIT" ]
null
null
null
InventoryOptimExample/__init__.py
abbasmalekpour/InventoryOptim
20e0a2311d270341e91ed8af3b90f416b8efb02b
[ "MIT" ]
2
2019-08-09T22:10:17.000Z
2019-11-12T04:50:31.000Z
from .inventory import InventoryOptim
37
37
0.891892
4
37
8.25
1
0
0
0
0
0
0
0
0
0
0
0
0.081081
37
1
37
37
0.970588
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
4d79e65ae09435b97dc5a221ef0a83ea788da7dd
3,277
py
Python
test/transforms/test_to_sparse_tensor.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
2,350
2021-09-12T08:32:50.000Z
2022-03-31T18:09:36.000Z
test/transforms/test_to_sparse_tensor.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
588
2021-09-12T08:49:08.000Z
2022-03-31T21:02:13.000Z
test/transforms/test_to_sparse_tensor.py
NucciTheBoss/pytorch_geometric
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
[ "MIT" ]
505
2021-09-13T13:13:32.000Z
2022-03-31T15:54:00.000Z
import torch from torch_geometric.data import Data, HeteroData from torch_geometric.transforms import ToSparseTensor def test_to_sparse_tensor(): assert ToSparseTensor().__repr__() == 'ToSparseTensor()' edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) edge_weight = torch.randn(edge_index.size(1)) edge_attr = torch.randn(edge_index.size(1), 8) perm = torch.tensor([1, 0, 3, 2]) data = Data(edge_index=edge_index, edge_weight=edge_weight, edge_attr=edge_attr, num_nodes=3) data = ToSparseTensor()(data) assert len(data) == 3 assert data.adj_t.storage.row().tolist() == [0, 1, 1, 2] assert data.adj_t.storage.col().tolist() == [1, 0, 2, 1] assert data.adj_t.storage.value().tolist() == edge_weight[perm].tolist() assert data.edge_attr.tolist() == edge_attr[perm].tolist() assert data.num_nodes == 3 def test_to_sparse_tensor_and_keep_edge_index(): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) edge_weight = torch.randn(edge_index.size(1)) edge_attr = torch.randn(edge_index.size(1), 8) perm = torch.tensor([1, 0, 3, 2]) data = Data(edge_index=edge_index, edge_weight=edge_weight, edge_attr=edge_attr, num_nodes=3) data = ToSparseTensor(remove_edge_index=False)(data) assert len(data) == 5 assert data.adj_t.storage.row().tolist() == [0, 1, 1, 2] assert data.adj_t.storage.col().tolist() == [1, 0, 2, 1] assert data.adj_t.storage.value().tolist() == edge_weight[perm].tolist() assert data.edge_index.tolist() == edge_index[:, perm].tolist() assert data.edge_weight.tolist() == edge_weight[perm].tolist() assert data.edge_attr.tolist() == edge_attr[perm].tolist() assert data.num_nodes == 3 def test_hetero_to_sparse_tensor(): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) data = HeteroData() data['v'].num_nodes = 3 data['w'].num_nodes = 3 data['v', 'v'].edge_index = edge_index data['v', 'w'].edge_index = edge_index data = ToSparseTensor()(data) assert data['v', 'v'].adj_t.storage.row().tolist() == [0, 1, 1, 2] assert data['v', 'v'].adj_t.storage.col().tolist() == [1, 0, 2, 1] assert data['v', 'v'].adj_t.storage.value() is None assert data['v', 'w'].adj_t.storage.row().tolist() == [0, 1, 1, 2] assert data['v', 'w'].adj_t.storage.col().tolist() == [1, 0, 2, 1] assert data['v', 'w'].adj_t.storage.value() is None def test_to_sparse_tensor_num_nodes_equals_num_edges(): x = torch.arange(4) y = torch.arange(4) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) edge_weight = torch.randn(edge_index.size(1)) edge_attr = torch.randn(edge_index.size(1), 8) perm = torch.tensor([1, 0, 3, 2]) data = Data(x=x, edge_index=edge_index, edge_weight=edge_weight, edge_attr=edge_attr, y=y) data = ToSparseTensor()(data) assert len(data) == 4 assert data.x.tolist() == [0, 1, 2, 3] assert data.adj_t.storage.row().tolist() == [0, 1, 1, 2] assert data.adj_t.storage.col().tolist() == [1, 0, 2, 1] assert data.adj_t.storage.value().tolist() == edge_weight[perm].tolist() assert data.edge_attr.tolist() == edge_attr[perm].tolist() assert data.y.tolist() == [0, 1, 2, 3]
39.481928
76
0.635032
525
3,277
3.769524
0.100952
0.109146
0.083375
0.018191
0.812026
0.739768
0.702375
0.67711
0.67711
0.67711
0
0.041713
0.180653
3,277
82
77
39.963415
0.695345
0
0
0.523077
0
0
0.010375
0
0
0
0
0
0.430769
1
0.061538
false
0
0.046154
0
0.107692
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
4d7f10983207c620617673600af7546360e4b377
555
py
Python
thenewboston_node/core/utils/pytest.py
AbhayAysola/thenewboston-node
8a24cfd814eed590a7a1066e45b8b4877501aa35
[ "MIT" ]
30
2021-03-05T22:08:17.000Z
2021-09-23T02:45:45.000Z
thenewboston_node/core/utils/pytest.py
AbhayAysola/thenewboston-node
8a24cfd814eed590a7a1066e45b8b4877501aa35
[ "MIT" ]
148
2021-03-05T23:37:50.000Z
2021-11-02T02:18:58.000Z
thenewboston_node/core/utils/pytest.py
AbhayAysola/thenewboston-node
8a24cfd814eed590a7a1066e45b8b4877501aa35
[ "MIT" ]
14
2021-03-05T21:58:46.000Z
2021-10-15T17:27:52.000Z
import os import sys from .misc import yaml_coerce PYTEST_RUN_SLOW_TESTS = 'PYTEST_RUN_SLOW_TESTS' def is_pytest_running(): # TODO(dmu) HIGH: Implement a better way of detecting pytest return os.getenv('PYTEST_RUNNING') == 'true' or os.path.basename(sys.argv[0]) in ('pytest', 'py.test') def should_run(skip_name): return bool(yaml_coerce(os.getenv(skip_name))) def skip_slow(wrapped): import pytest # because pytest is a dev dependency return pytest.mark.skipif(not should_run(PYTEST_RUN_SLOW_TESTS), reason='Slow')(wrapped)
26.428571
106
0.745946
87
555
4.54023
0.528736
0.068354
0.098734
0.136709
0
0
0
0
0
0
0
0.002105
0.144144
555
20
107
27.75
0.829474
0.167568
0
0
0
0
0.122004
0.045752
0
0
0
0.05
0
1
0.272727
false
0
0.363636
0.181818
0.909091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
1
1
1
0
0
5
4d945e29879165185ac990059a24e611125327f2
119
py
Python
lib/models/fusion_modules/__init__.py
CFM-MSG/Code_LEORN
fabea1e1ded973a4db692e51e2df442bde55f626
[ "MIT" ]
1
2022-01-31T03:23:37.000Z
2022-01-31T03:23:37.000Z
lib/models/fusion_modules/__init__.py
CFM-MSG/Code_LEORN
fabea1e1ded973a4db692e51e2df442bde55f626
[ "MIT" ]
null
null
null
lib/models/fusion_modules/__init__.py
CFM-MSG/Code_LEORN
fabea1e1ded973a4db692e51e2df442bde55f626
[ "MIT" ]
null
null
null
from .base_fusion import BaseFusion, EasyFusion, CatFusion from .semantic_enhancement import SemanticEnhancementModule
39.666667
59
0.882353
12
119
8.583333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.084034
119
2
60
59.5
0.944954
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4dacf9ddef29fa15efa3ab7e518fe4e0527469fb
76
py
Python
rules/__init__.py
jasonzhang970611/254final
923ea900df08557a6ede7777c916e25ee06ec9a8
[ "MIT" ]
null
null
null
rules/__init__.py
jasonzhang970611/254final
923ea900df08557a6ede7777c916e25ee06ec9a8
[ "MIT" ]
null
null
null
rules/__init__.py
jasonzhang970611/254final
923ea900df08557a6ede7777c916e25ee06ec9a8
[ "MIT" ]
1
2021-05-11T23:13:52.000Z
2021-05-11T23:13:52.000Z
"""Rule module""" from .rule import Rule from .freestyle import FreeStyle
12.666667
32
0.736842
10
76
5.6
0.5
0
0
0
0
0
0
0
0
0
0
0
0.157895
76
5
33
15.2
0.875
0.144737
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4dd07b3108e3520f453e96d35adc718742b6cd79
95
py
Python
app/main/meta/__init__.py
by46/coffee
f12e1e95f12da7e322a432a6386a1147c5549c3b
[ "MIT" ]
null
null
null
app/main/meta/__init__.py
by46/coffee
f12e1e95f12da7e322a432a6386a1147c5549c3b
[ "MIT" ]
null
null
null
app/main/meta/__init__.py
by46/coffee
f12e1e95f12da7e322a432a6386a1147c5549c3b
[ "MIT" ]
null
null
null
from .filter import filter_params from .user import UserModel from .coffee import CoffeeModel
31.666667
34
0.831579
13
95
6
0.615385
0
0
0
0
0
0
0
0
0
0
0
0.136842
95
3
35
31.666667
0.95122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
1514c47575e819a36a4fcdf738b5aac6e1b787e0
8,541
py
Python
ngsutils/fastq/t/test_barcode_split.py
bgruening/ngsutils
417e90dc1918fb553dd84990f2c54bd8cea8f44d
[ "BSD-3-Clause" ]
57
2015-03-09T01:26:45.000Z
2022-02-22T07:26:01.000Z
ngsutils/fastq/t/test_barcode_split.py
bgruening/ngsutils
417e90dc1918fb553dd84990f2c54bd8cea8f44d
[ "BSD-3-Clause" ]
33
2015-02-03T23:24:46.000Z
2022-03-16T20:08:10.000Z
ngsutils/fastq/t/test_barcode_split.py
bgruening/ngsutils
417e90dc1918fb553dd84990f2c54bd8cea8f44d
[ "BSD-3-Clause" ]
33
2015-01-18T16:47:47.000Z
2022-02-22T07:28:09.000Z
#!/usr/bin/env python ''' Tests for barcode_split Note: These tests use small barcodes, so some of the test sequences are pretty arbitrary. In real-life, the barcodes should be much longer (12-16bp). ''' import unittest import os import ngsutils.fastq.barcode_split from ngsutils.support import FASTA from ngsutils.fastq import FASTQ barcodes = { 'tag1': ('ATAT', '5'), 'tag2': ('TGTG', '5'), 'tag3': ('CTCT', '3') } barcodes2 = { 'tag1': ('AATTAA', '5'), 'tag2': ('GGTTCC', '5'), 'tag3': ('CCAACC', '3') } class BarcodeSplitTest(unittest.TestCase): def test_check_tags_5(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ATATaaaatttt', 0, 0, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'TGTGaaaatttt', 0, 0, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag2') self.assertTrue(results[2]) def test_check_tags_5_multi(self): "pulls out only the 5' ATAT, so the next ATAT is kept in-tact" valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ATATATATaaaatttt', 0, 0, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) def test_check_tags_3(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'aattttaaCTCT', 0, 0, False) # print valid, results self.assertTrue(valid) self.assertEqual(results[0], 'tag3') self.assertTrue(results[2]) def test_check_tags_5_fail(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'aaaattttATAT', 0, 0, False) self.assertFalse(valid) self.assertEqual(results[0], 'tag1') # not at right location self.assertTrue(results[2]) def test_check_tags_5_revcomp(self): # matches ATAT in rev-comp valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'aaaattttATAT', 0, 0, True) self.assertTrue(valid) self.assertEqual(results[0], 'tag1') self.assertFalse(results[2]) def test_check_tags_5_mm(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ATTTacgtacgt', 0, 0, False) self.assertFalse(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ATTTacgtacgt', 1, 0, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) def test_check_tags_5_pos(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'gATATacgtacgt', 0, 0, False) self.assertFalse(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'gATATacgtacgt', 0, 1, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag1') self.assertTrue(results[2]) def test_check_tags_3_mm(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ttttaaaaggggCGCT', 0, 0, False) self.assertFalse(valid) self.assertEqual(results[0], 'tag3') self.assertTrue(results[2]) valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ttttaaaaggggCGCT', 1, 0, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag3') self.assertTrue(results[2]) def test_check_tags_3_pos(self): valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ttttaaaaggggCTCTg', 0, 0, False) self.assertFalse(valid) self.assertEqual(results[0], 'tag3') self.assertTrue(results[2]) valid, results = ngsutils.fastq.barcode_split.check_tags(barcodes, 'ttttaaaaggggCTCTg', 0, 1, False) self.assertTrue(valid) self.assertEqual(results[0], 'tag3') self.assertTrue(results[2]) def test_splitFasta(self): path = os.path.dirname(__file__) ngsutils.fastq.barcode_split.fastx_barcode_split(FASTA(os.path.join(path, 'test_barcodes.fasta')), os.path.join(path, 'out.%s.fasta'), barcodes2) self.assert_fasta_contains(os.path.join(path, 'out.%s.fasta'), { 'missing': 'foo-rc foo1 bar1 baz1 foo2 bar2 baz2 foo1-rc foo2-rc', 'tag1': 'foo', 'tag2': 'bar', 'tag3': 'baz' }) self._unlink_fastx(os.path.join(path, 'out.%s.fasta'), 'missing tag1 tag2 tag3'.split()) def test_splitFastaRevComp(self): path = os.path.dirname(__file__) ngsutils.fastq.barcode_split.fastx_barcode_split(FASTA(os.path.join(path, 'test_barcodes.fasta')), os.path.join(path, 'out.%s.fasta'), barcodes2, allow_revcomp=True) self.assert_fasta_contains(os.path.join(path, 'out.%s.fasta'), { 'missing': 'foo1 bar1 baz1 foo2 bar2 baz2 foo1-rc foo2-rc', 'tag1': 'foo foo-rc', 'tag2': 'bar', 'tag3': 'baz' }) self._unlink_fastx(os.path.join(path, 'out.%s.fasta'), 'missing tag1 tag2 tag3'.split()) def test_splitFastaEdit(self): path = os.path.dirname(__file__) ngsutils.fastq.barcode_split.fastx_barcode_split(FASTA(os.path.join(path, 'test_barcodes.fasta')), os.path.join(path, 'out.%s.fasta'), barcodes2, allow_revcomp=True, edits=1) self.assert_fasta_contains(os.path.join(path, 'out.%s.fasta'), { 'missing': 'foo2 bar2 baz2 foo2-rc', 'tag1': 'foo foo-rc foo1 foo1-rc', 'tag2': 'bar bar1', 'tag3': 'baz baz1' }) self._unlink_fastx(os.path.join(path, 'out.%s.fasta'), 'missing tag1 tag2 tag3'.split()) def test_splitFastaOffset(self): path = os.path.dirname(__file__) ngsutils.fastq.barcode_split.fastx_barcode_split(FASTA(os.path.join(path, 'test_barcodes.fasta')), os.path.join(path, 'out.%s.fasta'), barcodes2, allow_revcomp=True, pos=1) self.assert_fasta_contains(os.path.join(path, 'out.%s.fasta'), { 'missing': 'foo1 bar1 baz1 foo1-rc', 'tag1': 'foo foo-rc foo2 foo2-rc', 'tag2': 'bar bar2', 'tag3': 'baz baz2' }) self._unlink_fastx(os.path.join(path, 'out.%s.fasta'), 'missing tag1 tag2 tag3'.split()) def test_splitFastq(self): path = os.path.dirname(__file__) ngsutils.fastq.barcode_split.fastx_barcode_split(FASTQ(os.path.join(path, 'test_barcodes.fastq')), os.path.join(path, 'out.%s.fastq'), barcodes2, allow_revcomp=True) self.assert_fastq_contains(os.path.join(path, 'out.%s.fastq'), { 'missing': ('quux', '', ''), 'tag1': ('foo foo-rc', 'atcgatcgatcgatcg atcgatcgatcgatcg', 'AAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAA'), 'tag2': ('bar', 'gctagctagctagcta', 'AAAAAAAAAAAAAAAA'), 'tag3': ('baz', 'acgtacgtacgtacgt', 'AAAAAAAAAAAAAAAA') }) self._unlink_fastx(os.path.join(path, 'out.%s.fastq'), 'missing tag1 tag2 tag3'.split()) def _unlink_fastx(self, base, names): for name in names: os.unlink(base % name) def assert_fasta_contains(self, base, args): for tag in args: valid = args[tag].split() fa = FASTA(base % tag) count = 0 for read in fa.fetch(): if read.name in valid: count += 1 else: self.assertEqual('extra read in %s' % tag, read.name) self.assertEqual(count, len(valid)) def assert_fastq_contains(self, base, args): for tag in args: valid = args[tag][0].split() seq_qual = {} if args[tag][1]: for n, s, q in zip(valid, args[tag][1].split(), args[tag][2].split()): seq_qual[n] = (s, q) fq = FASTQ(base % tag) count = 0 for read in fq.fetch(): if read.name in valid: count += 1 if seq_qual: self.assertEqual(seq_qual[read.name], (read.seq, read.qual)) else: self.assertEqual('extra read in %s' % tag, read.name) self.assertEqual(count, len(valid)) if __name__ == '__main__': unittest.main()
40.478673
182
0.616438
1,069
8,541
4.78391
0.139383
0.061009
0.078217
0.097771
0.773563
0.765546
0.745209
0.725655
0.706492
0.663864
0
0.02686
0.241541
8,541
210
183
40.671429
0.762581
0.037935
0
0.460606
0
0
0.155093
0
0
0
0
0
0.327273
1
0.10303
false
0
0.030303
0
0.139394
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
128e05b468000cd1e7d5d92d6b1ae4080ee7ff52
157
py
Python
Zad3/forum/admin.py
YuseqYaseq/Logic
295327312c6804dbbbf230e81b1724f81f26168e
[ "MIT" ]
null
null
null
Zad3/forum/admin.py
YuseqYaseq/Logic
295327312c6804dbbbf230e81b1724f81f26168e
[ "MIT" ]
null
null
null
Zad3/forum/admin.py
YuseqYaseq/Logic
295327312c6804dbbbf230e81b1724f81f26168e
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Thread, Message # Register your models here. admin.site.register(Thread) admin.site.register(Message)
19.625
35
0.802548
22
157
5.727273
0.545455
0.142857
0.269841
0
0
0
0
0
0
0
0
0
0.11465
157
7
36
22.428571
0.906475
0.165605
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
12bdd52654b4ce48d763184ee2676cd0d26a2800
334
py
Python
jupy4syn/commands/ctCommand.py
gabrielpreviato/test
a5e57d00a546bd8d939d5d71bb19364fb4524441
[ "0BSD" ]
null
null
null
jupy4syn/commands/ctCommand.py
gabrielpreviato/test
a5e57d00a546bd8d939d5d71bb19364fb4524441
[ "0BSD" ]
null
null
null
jupy4syn/commands/ctCommand.py
gabrielpreviato/test
a5e57d00a546bd8d939d5d71bb19364fb4524441
[ "0BSD" ]
null
null
null
from scan_utils import ct from jupy4syn.commands.ICommand import ICommand class ctCommand(ICommand): def __init__(self): pass def exec(self, parameters): return ct.main(parameters.split()) def args(self, initial_args): return initial_args def show(self, initial_args): return True
19.647059
47
0.679641
42
334
5.214286
0.547619
0.150685
0.136986
0.191781
0
0
0
0
0
0
0
0.003953
0.242515
334
16
48
20.875
0.86166
0
0
0
0
0
0
0
0
0
0
0
0
1
0.363636
false
0.090909
0.181818
0.272727
0.909091
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
12c9569738305d6b431a02f72bb8573b85ae4b25
1,052
py
Python
apkg/apkg.py
soimort/agda-pkg
1fb10020213bc7c590570ffe34aab4c66f47fb8b
[ "MIT" ]
null
null
null
apkg/apkg.py
soimort/agda-pkg
1fb10020213bc7c590570ffe34aab4c66f47fb8b
[ "MIT" ]
null
null
null
apkg/apkg.py
soimort/agda-pkg
1fb10020213bc7c590570ffe34aab4c66f47fb8b
[ "MIT" ]
1
2022-01-29T11:37:06.000Z
2022-01-29T11:37:06.000Z
''' apkg ~~~~ A package manager for Agda. ''' # ---------------------------------------------------------------------------- import click from .commands import * from .commands.clean import clean from .commands.create import create from .commands.freeze import freeze from .commands.list import list from .commands.info import info from .commands.init import init from .commands.install import install from .commands.uninstall import uninstall from .commands.search import search from .commands.update import update from .commands.upgrade import upgrade # ---------------------------------------------------------------------------- @click.group() @click.version_option() def cli(): """A package manager for Agda.""" cli.add_command(init) cli.add_command(install) cli.add_command(uninstall) cli.add_command(freeze) cli.add_command(list) cli.add_command(info) cli.add_command(clean) cli.add_command(create) cli.add_command(search) cli.add_command(update) cli.add_command(upgrade)
23.909091
78
0.623574
122
1,052
5.278689
0.229508
0.223602
0.22205
0.055901
0.068323
0
0
0
0
0
0
0
0.148289
1,052
43
79
24.465116
0.71875
0.210076
0
0
0
0
0
0
0
0
0
0
0
1
0.037037
true
0
0.481481
0
0.518519
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
12d25dede1b582f3c835994c10f7e87e8717fee0
224
py
Python
7_kyu/sum_of_odd_numbers.py
dimishpatriot/way_on_the_highway
4865db946632b7bd3d74509a20a307841c02169d
[ "MIT" ]
null
null
null
7_kyu/sum_of_odd_numbers.py
dimishpatriot/way_on_the_highway
4865db946632b7bd3d74509a20a307841c02169d
[ "MIT" ]
null
null
null
7_kyu/sum_of_odd_numbers.py
dimishpatriot/way_on_the_highway
4865db946632b7bd3d74509a20a307841c02169d
[ "MIT" ]
null
null
null
"""Given the triangle of consecutive odd numbers: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 ... """ def row_sum_odd_numbers(n: int) -> int: return n ** 3
16
49
0.450893
33
224
2.969697
0.848485
0.204082
0
0
0
0
0
0
0
0
0
0.218487
0.46875
224
13
50
17.230769
0.605042
0.696429
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
12e80b401aba984f4648146ff695556a389f7577
25
py
Python
videoanalyst/model/loss/__init__.py
lizhenbang56/Manipulating-Template-Pixels-for-Model-Adaptation-of-Siamese-Visual-Tracking
76b88d8e68ac3d575a2ce81fc07ee2fce5f050d6
[ "MIT" ]
2
2020-07-30T08:26:08.000Z
2020-11-24T07:40:46.000Z
videoanalyst/model/loss/__init__.py
shartoo/video_analyst
db7c1b323f26ec19533a4b19804cf2c8a52643e5
[ "MIT" ]
null
null
null
videoanalyst/model/loss/__init__.py
shartoo/video_analyst
db7c1b323f26ec19533a4b19804cf2c8a52643e5
[ "MIT" ]
null
null
null
from .loss_impl import *
25
25
0.76
4
25
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
42221f009ac99fc6f86326bcdd2d193656207aa9
58
py
Python
backend/socket_server/handler/client_disconnect.py
JohnnyDevNull/python-nuxt-starter
e6158818b7536212dafec2dfe3bc70385110440c
[ "MIT" ]
null
null
null
backend/socket_server/handler/client_disconnect.py
JohnnyDevNull/python-nuxt-starter
e6158818b7536212dafec2dfe3bc70385110440c
[ "MIT" ]
1
2022-01-22T12:45:49.000Z
2022-01-22T12:45:49.000Z
backend/socket_server/handler/client_disconnect.py
JohnnyDevNull/python-nuxt-starter
e6158818b7536212dafec2dfe3bc70385110440c
[ "MIT" ]
null
null
null
def client_disconnect(): print('Client disconnected')
19.333333
32
0.741379
6
58
7
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.137931
58
2
33
29
0.84
0
0
0
0
0
0.327586
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
5
42942027cf0d72a947321e707d1525fd58ad9f4b
22,616
py
Python
algotom/post/postprocessing.py
gbzan/algotom
314f05b6a226e666a8ae4417b151d896606e7db4
[ "Apache-2.0" ]
null
null
null
algotom/post/postprocessing.py
gbzan/algotom
314f05b6a226e666a8ae4417b151d896606e7db4
[ "Apache-2.0" ]
null
null
null
algotom/post/postprocessing.py
gbzan/algotom
314f05b6a226e666a8ae4417b151d896606e7db4
[ "Apache-2.0" ]
null
null
null
# ============================================================================ # ============================================================================ # Copyright (c) 2021 Nghia T. Vo. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Author: Nghia T. Vo # E-mail: # Description: Python implementations of postprocessing techniques. # Contributors: # ============================================================================ """ Module of methods in the postprocessing stage: - Get statistical information of reconstructed images or a dataset. - Downsample 2D, 3D array, or a dataset. - Rescale 2D, 3D array or a dataset to 8-bit or 16-bit data-type. - Removing ring artifacts in a reconstructed image by transform back and forth between the polar coordinates and the Cartesian coordinates. """ import os import numpy as np from scipy.ndimage import gaussian_filter import algotom.util.utility as util import algotom.io.loadersaver as losa import algotom.prep.removal as remo def get_statical_information(mat, percentile=(5, 95), denoise=False): """ Get statical information of an image. Parameters ---------- mat : array_like 2D array. Projection image, sinogram image, or reconstructed image. percentile : tuple of floats Tuple of (min_percentile, max_percentile) to compute. Must be between 0 and 100 inclusive. denoise: bool, optional Enable/disable denoising before extracting statistical information. Returns ------- gmin : float The minimum value of the data array. gmax : float The maximum value of the data array. min_percent : float The first computed percentile of the data array. max_percent : tuple of floats The last computed percentile of the data array. mean : float The mean of the data array. median : float The median of the data array. variance : float The variance of the data array. """ if denoise is True: mat = gaussian_filter(mat, 2) gmin = np.min(mat) gmax = np.max(mat) min_percent = np.percentile(mat, percentile[0]) max_percent = np.percentile(mat, percentile[-1]) median = np.median(mat) mean = np.mean(mat) variance = np.var(mat) return gmin, gmax, min_percent, max_percent, mean, median, variance def get_statical_information_dataset(input_, percentile=(5, 95), skip=5, denoise=False, key_path=None): """ Get statical information of a dataset. This can be a folder of tif files, a hdf file, or a 3D array. Parameters ---------- input_ : str, hdf file, or array_like It can be a folder path to tif files, a hdf file, or a 3D array. percentile : tuple of floats Tuple of (min_percentile, max_percentile) to compute. Must be between 0 and 100 inclusive. skip : int Skipping step of reading input. denoise: bool, optional Enable/disable denoising before extracting statistical information. key_path : str, optional Key path to the dataset if the input is the hdf file. Returns ------- gmin : float The global minimum value of the data array. gmax : float The global maximum value of the data array. min_percent : float The global min of the first computed percentile of the data array. max_percent : tuple of floats The global min of the last computed percentile of the data array. mean : float The mean of the data array. median : float The median of the data array. variance : float The mean of the variance of the data array. """ if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""): list_file = losa.find_file(input_ + "/*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_)) list_stat = [] for i in range(0, depth, skip): mat = losa.load_image(list_file[i]) if denoise is True: mat = gaussian_filter(mat, 2) list_stat.append(get_statical_information(mat, percentile, denoise)) else: if isinstance(input_, str): file_ext = os.path.splitext(input_)[-1] if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "Can't open this type of file format {}".format(file_ext)) if key_path is None: raise ValueError( "Please provide the key path to the dataset!!!") input_ = losa.load_hdf(input_, key_path) depth = len(input_) list_stat = [] for i in range(0, depth, skip): mat = input_[i] if denoise is True: mat = gaussian_filter(mat, 2) list_stat.append(get_statical_information(mat, percentile, denoise)) list_stat = np.asarray(list_stat) gmin = np.min(list_stat[:, 0]) gmax = np.max(list_stat[:, 1]) min_percent = np.min(list_stat[:, 2]) max_percent = np.max(list_stat[:, 3]) median = np.median(list_stat[:, 4]) mean = np.mean(list_stat[:, 5]) variance = np.mean(list_stat[:, 6]) return gmin, gmax, min_percent, max_percent, mean, median, variance def downsample(mat, cell_size, method="mean"): """ Downsample an image. Parameters ---------- mat : array_like 2D array. cell_size : int or tuple of int Window size along axes used for grouping pixels. method : {"mean", "median", "max", "min"} Downsampling method. Returns ------- array_like Downsampled image. """ if method == "median": dsp_method = np.median elif method == "max": dsp_method = np.max elif method == "min": dsp_method = np.amin else: dsp_method = np.mean (height, width) = mat.shape if isinstance(cell_size, int): cell_size = (cell_size, cell_size) height_dsp = height // cell_size[0] width_dsp = width // cell_size[1] mat = mat[:height_dsp * cell_size[0], :width_dsp * cell_size[1]] mat_dsp = mat.reshape( height_dsp, cell_size[0], width_dsp, cell_size[1]) mat_dsp = dsp_method(dsp_method(mat_dsp, axis=-1), axis=1) return mat_dsp def downsample_dataset(input_, output, cell_size, method="mean", key_path=None): """ Downsample a dataset. This can be a folder of tif files, a hdf file, or a 3D array. Parameters ---------- input_ : str, array_like It can be a folder path to tif files, a hdf file, or 3D array. output : str, None It can be a folder path, a hdf file path, or None (memory consuming). cell_size : int or tuple of int Window size along axes used for grouping pixels. method : {"mean", "median", "max", "min"} Downsampling method. key_path : str, optional Key path to the dataset if the input is the hdf file. Returns ------- array_like or None If output is None, returning an 3D array. """ if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": file_base = os.path.dirname(output) if os.path.exists(file_base): raise ValueError("Folder exists!!! Please choose another path!!!") if method == "median": dsp_method = np.median elif method == "max": dsp_method = np.max elif method == "min": dsp_method = np.amin else: dsp_method = np.mean if isinstance(cell_size, int): cell_size = (cell_size, cell_size, cell_size) if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""): list_file = losa.find_file(input_ + "/*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_)) (height, width) = np.shape(losa.load_image(list_file[0])) depth_dsp = depth // cell_size[0] height_dsp = height // cell_size[1] width_dsp = width // cell_size[2] num = 0 if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0): if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "File extension must be hdf, h5, or nxs") output = file_base + file_ext data_out = losa.open_hdf_stream( output, (depth_dsp, height_dsp, width_dsp), key_path="downsample/data", overwrite=False) data_dsp = [] for i in range(0, depth, cell_size[0]): if (i + cell_size[0]) > depth: break else: mat = [] for j in range(i, i + cell_size[0]): mat.append(losa.load_image(list_file[j])) mat = np.asarray(mat) mat = mat[:, :height_dsp * cell_size[1], :width_dsp * cell_size[2]] mat = mat.reshape(1, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) mat_dsp = dsp_method( dsp_method(dsp_method(mat, axis=-1), axis=1), axis=2) if output is None: data_dsp.append(mat_dsp[0]) else: if file_ext == "": out_name = "0000" + str(num) losa.save_image( output + "/img_" + out_name[-5:] + ".tif", mat_dsp[0]) else: data_out[num] = mat_dsp[0] num += 1 else: raise ValueError("Incorrect cell size {}".format(cell_size)) else: if isinstance(input_, str): file_ext = os.path.splitext(input_)[-1] if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "Can't open this type of file format {}".format(file_ext)) if key_path is None: raise ValueError( "Please provide the key path to the dataset!!!") input_ = losa.load_hdf(input_, key_path) (depth, height, width) = input_.shape depth_dsp = depth // cell_size[0] height_dsp = height // cell_size[1] width_dsp = width // cell_size[2] if (depth_dsp != 0) and (height_dsp != 0) and (width_dsp != 0): if output is None: input_ = input_[:depth_dsp * cell_size[0], :height_dsp * cell_size[1], :width_dsp * cell_size[2]] input_ = input_.reshape( depth_dsp, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) data_dsp = dsp_method( dsp_method(dsp_method(input_, axis=-1), axis=1), axis=2) else: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "File extension must be hdf, h5, or nxs") output = file_base + file_ext data_out = losa.open_hdf_stream( output, (depth_dsp, height_dsp, width_dsp), key_path="downsample/data", overwrite=False) num = 0 for i in range(0, depth, cell_size[0]): if (i + cell_size[0]) > depth: break else: mat = input_[i:i + cell_size[0], :height_dsp * cell_size[1], :width_dsp * cell_size[2]] mat = mat.reshape(1, cell_size[0], height_dsp, cell_size[1], width_dsp, cell_size[2]) mat_dsp = dsp_method(dsp_method( dsp_method(mat, axis=-1), axis=1), axis=2) if file_ext != "": data_out[num] = mat_dsp[0] else: out_name = "0000" + str(num) losa.save_image( output + "/img_" + out_name[-5:] + ".tif", mat_dsp[0]) num += 1 else: raise ValueError("Incorrect cell size {}".format(cell_size)) if output is None: return np.asarray(data_dsp) def rescale(mat, nbit=16, minmax=None): """ Rescale a 32-bit array to 16-bit/8-bit data. Parameters ---------- mat : array_like nbit : {8,16} Rescaled data-type: 8-bit or 16-bit. minmax : tuple of float, or None Minimum and maximum values used for rescaling. Returns ------- array_like Rescaled array. """ if minmax is None: gmin, gmax = np.min(mat), np.max(mat) else: (gmin, gmax) = minmax mat = np.clip(mat, gmin, gmax) mat = (mat - gmin) / (gmax - gmin) if nbit == 8: mat = np.uint8(np.clip(mat * 255, 0, 255)) else: mat = np.uint16(np.clip(mat * 65535, 0, 65535)) return mat def rescale_dataset(input_, output, nbit=16, minmax=None, skip=None, key_path=None): """ Rescale a dataset to 8-bit or 16-bit data-type. The dataset can be a folder of tif files, a hdf file, or a 3D array. Parameters ---------- input_ : str, array_like It can be a folder path to tif files, a hdf file, or 3D array. output : str, None It can be a folder path, a hdf file path, or None (memory consuming). nbit : {8,16} Rescaled data-type: 8-bit or 16-bit. minmax : tuple of float, or None Minimum and maximum values used for rescaling. They are calculated if None is given. skip : int or None Skipping step of reading input used for getting statistical information. key_path : str, optional Key path to the dataset if the input is the hdf file. Returns ------- array_like or None If output is None, returning an 3D array. """ if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": file_base = os.path.dirname(output) if os.path.exists(file_base): raise ValueError("Folder exists!!! Please choose another path!!!") if isinstance(input_, str) and (os.path.splitext(input_)[-1] == ""): list_file = losa.find_file(input_ + "/*.tif*") depth = len(list_file) if depth == 0: raise ValueError("No tif files in the folder: {}".format(input_)) if minmax is None: if skip is None: skip = int(np.ceil(0.15 * depth)) (gmin, gmax) = get_statical_information_dataset(input_, skip=skip)[ 0:2] else: (gmin, gmax) = minmax if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError("File extension must be hdf, h5, or nxs") output = file_base + file_ext (height, width) = np.shape(losa.load_image(list_file[0])) if nbit == 8: data_type = "uint8" else: data_type = "uint16" data_out = losa.open_hdf_stream(output, (depth, height, width), key_path="rescale/data", data_type=data_type, overwrite=False) data_res = [] for i in range(0, depth): mat = rescale( losa.load_image(list_file[i]), nbit=nbit, minmax=(gmin, gmax)) if output is None: data_res.append(mat) else: file_base, file_ext = os.path.splitext(output) if file_ext == "": out_name = "0000" + str(i) losa.save_image(output + "/img_" + out_name[-5:] + ".tif", mat) else: data_out[i] = mat else: if isinstance(input_, str): file_ext = os.path.splitext(input_)[-1] if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError( "Can't open this type of file format {}".format(file_ext)) if key_path is None: raise ValueError( "Please provide the key path to the dataset!!!") input_ = losa.load_hdf(input_, key_path) (depth, height, width) = input_.shape if minmax is None: if skip is None: skip = int(np.ceil(0.15 * depth)) (gmin, gmax) = get_statical_information_dataset(input_, skip=skip, key_path=key_path)[ 0:2] else: (gmin, gmax) = minmax data_res = [] if output is not None: file_base, file_ext = os.path.splitext(output) if file_ext != "": if not (file_ext == '.hdf' or file_ext == '.h5' or file_ext == ".nxs"): raise ValueError("File extension must be hdf, h5, or nxs") output = file_base + file_ext if nbit == 8: data_type = "uint8" else: data_type = "uint16" data_out = losa.open_hdf_stream( output, (depth, height, width), key_path="rescale/data", data_type=data_type, overwrite=False) for i in range(0, depth): mat = rescale(input_[i], nbit=nbit, minmax=(gmin, gmax)) if output is None: data_res.append(mat) else: file_base, file_ext = os.path.splitext(output) if file_ext != "": data_out[i] = mat else: out_name = "0000" + str(i) losa.save_image(output + "/img_" + out_name[-5:] + ".tif", mat) if output is None: return np.asarray(data_res) def remove_ring_based_fft(mat, u=20, n=8, v=1, sort=False): """ Remove ring artifacts in the reconstructed image by combining the polar transform and the fft-based method. Parameters ---------- mat : array_like Square array. Reconstructed image u : int Cutoff frequency. n : int Filter order. v : int Number of rows (* 2) to be applied the filter. sort : bool, optional Apply sorting (Ref. [2]) if True. Returns ------- array_like Ring-removed image. References ---------- .. [1] https://doi.org/10.1063/1.1149043 .. [2] https://doi.org/10.1364/OE.26.028396 """ (nrow, ncol) = mat.shape if nrow != ncol: raise ValueError( "Width and height of the reconstructed image are not the same") mask = util.make_circle_mask(ncol, 1.0) (x_mat, y_mat) = util.rectangular_from_polar(ncol, ncol, ncol, ncol) (r_mat, theta_mat) = util.polar_from_rectangular(ncol, ncol, ncol, ncol) polar_mat = util.mapping(mat, x_mat, y_mat) polar_mat = remo.remove_stripe_based_fft(polar_mat, u, n, v, sort=sort) mat_rec = util.mapping(polar_mat, r_mat, theta_mat) return mat_rec * mask def remove_ring_based_wavelet_fft(mat, level=5, size=1, wavelet_name="db9", sort=False): """ Remove ring artifacts in a reconstructed image by combining the polar transform and the wavelet-fft-based method (Ref. [1]). Parameters ---------- mat : array_like Square array. Reconstructed image level : int Wavelet decomposition level. size : int Damping parameter. Larger is stronger. wavelet_name : str Name of a wavelet. Search pywavelets API for a full list. sort : bool, optional Apply sorting (Ref. [2]) if True. Returns ------- array_like Ring-removed image. References ---------- .. [1] https://doi.org/10.1364/OE.17.008567 .. [2] https://doi.org/10.1364/OE.26.028396 """ (nrow, ncol) = mat.shape if nrow != ncol: raise ValueError( "Width and height of the reconstructed image are not the same") mask = util.make_circle_mask(ncol, 1.0) (x_mat, y_mat) = util.rectangular_from_polar(ncol, ncol, ncol, ncol) (r_mat, theta_mat) = util.polar_from_rectangular(ncol, ncol, ncol, ncol) polar_mat = util.mapping(mat, x_mat, y_mat) polar_mat = remo.remove_stripe_based_wavelet_fft(polar_mat, level, size, wavelet_name, sort=sort) mat_rec = util.mapping(polar_mat, r_mat, theta_mat) return mat_rec * mask
38.528109
80
0.534887
2,827
22,616
4.125221
0.117439
0.034985
0.016978
0.016807
0.781598
0.751758
0.730921
0.719088
0.693878
0.67467
0
0.020492
0.350504
22,616
586
81
38.593857
0.773436
0.277812
0
0.747059
0
0
0.064794
0
0
0
0
0
0
1
0.023529
false
0
0.017647
0
0.064706
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
429a3b9f7b2c80becb4ddde048b1884840251023
439
py
Python
proposals/translation.py
mindruion/test
d27ef1caf8f76aead934bc83be7729f79a4be503
[ "MIT" ]
2
2017-04-22T11:07:13.000Z
2018-03-02T12:23:24.000Z
proposals/translation.py
mindruion/test
d27ef1caf8f76aead934bc83be7729f79a4be503
[ "MIT" ]
124
2020-04-30T07:06:58.000Z
2022-03-28T12:50:16.000Z
proposals/translation.py
mindruion/test
d27ef1caf8f76aead934bc83be7729f79a4be503
[ "MIT" ]
1
2021-08-04T11:44:21.000Z
2021-08-04T11:44:21.000Z
from modeltranslation.translator import register, TranslationOptions from .models import Funding, Relation, Institution @register(Funding) class FundingTranslationOptions(TranslationOptions): fields = ('description',) @register(Institution) class FundingTranslationOptions(TranslationOptions): fields = ('description',) @register(Relation) class RelationTranslationOptions(TranslationOptions): fields = ('description',)
23.105263
68
0.797267
33
439
10.606061
0.454545
0.205714
0.3
0.308571
0.417143
0.417143
0
0
0
0
0
0
0.109339
439
18
69
24.388889
0.895141
0
0
0.454545
0
0
0.075171
0
0
0
0
0
0
1
0
false
0
0.181818
0
0.727273
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
c41fa389788d2bc39269275d11e4597f13ff2e7a
167
py
Python
python/geospark/register/__init__.py
Maxar-Corp/GeoSpark
6248c6773dc88bf3354ea9b223f16ceb064e7627
[ "Apache-2.0", "MIT" ]
1
2021-10-19T07:57:29.000Z
2021-10-19T07:57:29.000Z
python/geospark/register/__init__.py
mayankkt9/GeoSpark
618da90413f7d86c59def92ba765fbd6d9d49761
[ "Apache-2.0", "MIT" ]
3
2020-03-24T18:20:35.000Z
2021-02-02T22:36:37.000Z
python/geospark/register/__init__.py
mayankkt9/GeoSpark
618da90413f7d86c59def92ba765fbd6d9d49761
[ "Apache-2.0", "MIT" ]
1
2021-09-26T15:51:22.000Z
2021-09-26T15:51:22.000Z
from geospark.register.geo_registrator import GeoSparkRegistrator from geospark.register.uploading import upload_jars __all__ = ["GeoSparkRegistrator", "upload_jars"]
41.75
65
0.856287
18
167
7.555556
0.611111
0.176471
0.294118
0
0
0
0
0
0
0
0
0
0.071856
167
4
66
41.75
0.877419
0
0
0
0
0
0.178571
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
c426ccec48489402b5fe37eec30fdf9b8a18263e
17
py
Python
app/co/__init__.py
random-forest/co
398a77914cbff2af93b4c6c114a97075a4a13aa8
[ "MIT" ]
null
null
null
app/co/__init__.py
random-forest/co
398a77914cbff2af93b4c6c114a97075a4a13aa8
[ "MIT" ]
null
null
null
app/co/__init__.py
random-forest/co
398a77914cbff2af93b4c6c114a97075a4a13aa8
[ "MIT" ]
null
null
null
from co import *
8.5
16
0.705882
3
17
4
1
0
0
0
0
0
0
0
0
0
0
0
0.235294
17
1
17
17
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c434f990f7277abcb8d31f293bd2e90d463634cb
188
py
Python
tests/test_get_indices.py
gfsemelas/nesting
eda46bb6e1dc2844208849452dee50fd324486df
[ "MIT" ]
null
null
null
tests/test_get_indices.py
gfsemelas/nesting
eda46bb6e1dc2844208849452dee50fd324486df
[ "MIT" ]
null
null
null
tests/test_get_indices.py
gfsemelas/nesting
eda46bb6e1dc2844208849452dee50fd324486df
[ "MIT" ]
null
null
null
from nesting import get_indices def test_get_indices(): test_list = [1, 2, [3, 4], [5, [100, 200, ['hello']], 23, 11], 1, 7] assert get_indices(test_list, 'hello') == [3, 1, 2, 0]
37.6
72
0.601064
33
188
3.242424
0.636364
0.280374
0.261682
0.336449
0
0
0
0
0
0
0
0.138158
0.191489
188
5
73
37.6
0.565789
0
0
0
0
0
0.05291
0
0
0
0
0
0.25
1
0.25
false
0
0.25
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
c45a89abf0b9a7d3b7b0b281b5f210a53c85ca80
97
py
Python
Task/Filter/Python/filter-3.py
LaudateCorpus1/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
5
2021-01-29T20:08:05.000Z
2022-03-22T06:16:05.000Z
Task/Filter/Python/filter-3.py
seanwallawalla-forks/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
null
null
null
Task/Filter/Python/filter-3.py
seanwallawalla-forks/RosettaCodeData
9ad63ea473a958506c041077f1d810c0c7c8c18d
[ "Info-ZIP" ]
1
2021-04-13T04:19:31.000Z
2021-04-13T04:19:31.000Z
values = range(10) values[::2] = [11,13,15,17,19] print values 11, 1, 13, 3, 15, 5, 17, 7, 19, 9
19.4
33
0.57732
22
97
2.545455
0.681818
0
0
0
0
0
0
0
0
0
0
0.35443
0.185567
97
4
34
24.25
0.35443
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
c4793f07bcc999859482152f63e865ea46e63c05
51,884
py
Python
tests/parsers/test_vasp.py
lancekavalsky/pif-dft
8bb4047d7c5b7e2ee7dfee8ed4b8dd45e7943bde
[ "Apache-2.0" ]
5
2017-03-06T19:05:09.000Z
2018-10-22T16:56:04.000Z
tests/parsers/test_vasp.py
aced-differentiate/pif-dft
8bb4047d7c5b7e2ee7dfee8ed4b8dd45e7943bde
[ "Apache-2.0" ]
22
2017-02-01T03:38:44.000Z
2019-10-04T14:17:31.000Z
tests/parsers/test_vasp.py
aced-differentiate/pif-dft
8bb4047d7c5b7e2ee7dfee8ed4b8dd45e7943bde
[ "Apache-2.0" ]
10
2017-02-01T03:37:21.000Z
2020-04-22T20:34:19.000Z
import unittest from dfttopif.parsers import VaspParser from dfttopif.parsers.base import InvalidIngesterException from ..test_pif import unpack_example, delete_example from pypif.obj.common.value import Value import os import shutil class TestVASPParser(unittest.TestCase): def get_parser(self,name): '''Get a VaspParser for a certain test''' unpack_example(os.path.join('examples', 'vasp', name+'.tar.gz')) return VaspParser.generate_from_directory(name) def test_perov(self): # Parse the results parser = self.get_parser('perov_relax_U') # Test the settings self.assertEquals('VASP', parser.get_name()) strc = parser.get_output_structure() self.assertAlmostEquals(3.9088966983609255, strc.cell[1][1]) self.assertEquals(['La','Mn','O','O','O'], strc.get_chemical_symbols()) self.assertEquals('LaMnO3', parser.get_composition()) # Test the density self.assertEqual("g/(cm^3)", parser.get_density().units) self.assertAlmostEqual(6.7238121, parser.get_density().scalars[0].value, places=6) # Test the cutoff energy res = parser.get_cutoff_energy() self.assertEquals(400, res.scalars[0].value) self.assertEquals('eV', res.units) self.assertTrue(parser.is_converged().scalars[0].value) self.assertAlmostEqual(-39.85550532, parser.get_total_energy().scalars[0].value) self.assertEquals(None, parser.uses_SOC()) self.assertTrue(isinstance(parser.is_relaxed(), Value)) self.assertEquals('PAW_PBE', parser.get_xc_functional().scalars[0].value) self.assertEquals(['La','Mn','O'], list(map(lambda x: x.value, parser.get_pp_name().vectors[0]))) self.assertEquals(8640, parser.get_KPPRA().scalars[0].value) self.assertEquals('5.3.2', parser.get_version_number()) self.assertEquals({'Type': 2, 'Values':{'La':{'L':-1,'U':0.0,'J':0.0},'Mn':{'L':2,'U':3.8,'J':0.0},'O':{'L':-1,'U':0.0,'J':0.0}}}, parser.get_U_settings().as_dictionary()) self.assertEquals(None, parser.get_vdW_settings()) self.assertEquals(0.09, parser.get_pressure().scalars[0].value) self.assertEquals([[0.08970,0,0],[0,0.08970,0],[0,0,0.08970]], list(map(lambda x: list(map(lambda y: y.value, x)), parser.get_stresses().matrices[0]))) self.assertEquals(0, parser.get_band_gap().scalars[0].value) dos = parser.get_dos() self.assertEquals([-26.378, -26.241, -26.103000000000002, -25.966000000000001, -25.829000000000001, -25.690999999999999, -25.553999999999998, -25.417000000000002, -25.279, -25.141999999999999, -25.004999999999999, -24.867000000000001, -24.73, -24.593, -24.454999999999998, -24.318000000000001, -24.181000000000001, -24.042999999999999, -23.905999999999999, -23.768999999999998, -23.631, -23.494, -23.356999999999999, -23.219000000000001, -23.082000000000001, -22.945, -22.806999999999999, -22.670000000000002, -22.533000000000001, -22.395, -22.257999999999999, -22.120999999999999, -21.983000000000001, -21.846, -21.709, -21.571000000000002, -21.434000000000001, -21.297000000000001, -21.158999999999999, -21.021999999999998, -20.884, -20.747, -20.609999999999999, -20.472000000000001, -20.335000000000001, -20.198, -20.059999999999999, -19.922999999999998, -19.786000000000001, -19.648, -19.510999999999999, -19.373999999999999, -19.236000000000001, -19.099, -18.962, -18.824000000000002, -18.687000000000001, -18.550000000000001, -18.411999999999999, -18.274999999999999, -18.138000000000002, -18.0, -17.863, -17.725999999999999, -17.588000000000001, -17.451000000000001, -17.314, -17.175999999999998, -17.039000000000001, -16.902000000000001, -16.763999999999999, -16.626999999999999, -16.489999999999998, -16.352, -16.215, -16.077999999999999, -15.94, -15.803000000000001, -15.666, -15.528, -15.391, -15.254, -15.116, -14.978999999999999, -14.842000000000001, -14.704000000000001, -14.567, -14.43, -14.292, -14.154999999999999, -14.018000000000001, -13.880000000000001, -13.743, -13.606, -13.468, -13.331, -13.194000000000001, -13.055999999999999, -12.919, -12.782, -12.644, -12.507, -12.369999999999999, -12.231999999999999, -12.095000000000001, -11.958, -11.82, -11.683, -11.545999999999999, -11.407999999999999, -11.271000000000001, -11.134, -10.996, -10.859, -10.722, -10.584, -10.446999999999999, -10.31, -10.172000000000001, -10.035, -9.8979999999999997, -9.7599999999999998, -9.6229999999999993, -9.4860000000000007, -9.3480000000000008, -9.2110000000000003, -9.0739999999999998, -8.9359999999999999, -8.7989999999999995, -8.6620000000000008, -8.5239999999999991, -8.3870000000000005, -8.25, -8.1120000000000001, -7.9749999999999996, -7.8380000000000001, -7.7000000000000002, -7.5629999999999997, -7.4260000000000002, -7.2880000000000003, -7.1509999999999998, -7.0140000000000002, -6.8760000000000003, -6.7389999999999999, -6.6020000000000003, -6.4640000000000004, -6.327, -6.1890000000000001, -6.0519999999999996, -5.915, -5.7770000000000001, -5.6399999999999997, -5.5030000000000001, -5.3650000000000002, -5.2279999999999998, -5.0910000000000002, -4.9530000000000003, -4.8159999999999998, -4.6790000000000003, -4.5410000000000004, -4.4039999999999999, -4.2670000000000003, -4.1289999999999996, -3.992, -3.855, -3.7170000000000001, -3.5800000000000001, -3.4430000000000001, -3.3050000000000002, -3.1680000000000001, -3.0310000000000001, -2.8929999999999998, -2.7559999999999998, -2.6190000000000002, -2.4809999999999999, -2.3439999999999999, -2.2069999999999999, -2.069, -1.9319999999999999, -1.7949999999999999, -1.657, -1.52, -1.383, -1.2450000000000001, -1.1080000000000001, -0.97099999999999997, -0.83299999999999996, -0.69599999999999995, -0.55900000000000005, -0.42099999999999999, -0.28399999999999997, -0.14699999999999999, -0.0089999999999999993, 0.128, 0.26500000000000001, 0.40300000000000002, 0.54000000000000004, 0.67700000000000005, 0.81499999999999995, 0.95199999999999996, 1.089, 1.2270000000000001, 1.3640000000000001, 1.5009999999999999, 1.639, 1.776, 1.913, 2.0510000000000002, 2.1880000000000002, 2.3250000000000002, 2.4630000000000001, 2.6000000000000001, 2.7370000000000001, 2.875, 3.012, 3.149, 3.2869999999999999, 3.4239999999999999, 3.5609999999999999, 3.6989999999999998, 3.8359999999999999, 3.9729999999999999, 4.1109999999999998, 4.2480000000000002, 4.3849999999999998, 4.5229999999999997, 4.6600000000000001, 4.7969999999999997, 4.9349999999999996, 5.0720000000000001, 5.2089999999999996, 5.3470000000000004, 5.484, 5.6210000000000004, 5.7590000000000003, 5.8959999999999999, 6.0330000000000004, 6.1710000000000003, 6.3079999999999998, 6.4450000000000003, 6.5830000000000002, 6.7199999999999998, 6.8570000000000002, 6.9950000000000001, 7.1319999999999997, 7.2690000000000001, 7.407, 7.5439999999999996, 7.681, 7.819, 7.9560000000000004, 8.093, 8.2309999999999999, 8.3680000000000003, 8.5060000000000002, 8.6430000000000007, 8.7799999999999994, 8.9179999999999993, 9.0549999999999997, 9.1920000000000002, 9.3300000000000001, 9.4670000000000005, 9.6039999999999992, 9.7420000000000009, 9.8789999999999996, 10.016, 10.154, 10.291, 10.428000000000001, 10.566000000000001, 10.702999999999999, 10.84, 10.978, 11.115, 11.252000000000001, 11.390000000000001, 11.526999999999999, 11.664, 11.802, 11.939, 12.076000000000001, 12.214, 12.351000000000001, 12.488, 12.625999999999999, 12.763, 12.9, 13.038, 13.175000000000001, 13.311999999999999, 13.449999999999999, 13.587, 13.724, 13.862, 13.999000000000001, 14.135999999999999, 14.273999999999999, 14.411, 14.548, 14.686, 14.823], list(map(lambda x: x.value, dos.conditions.scalars))) self.assertEquals([0.0, 0.0, -1.19974e-35, -3.6470000000000002e-30, -1.3654e-25, -2.0122999999999998e-21, -1.1612e-17, -2.5870000000000001e-14, -2.3158e-11, -8.1289999999999995e-09, -1.1086000000000001e-06, -5.7370000000000001e-05, -0.0010558, -0.0053100000000000005, 0.0099659999999999992, 0.09085, 0.12007000000000001, 0.035970000000000002, -0.0050520000000000001, -0.002444, -0.00020777999999999999, -5.8720000000000007e-06, -6.1280000000000003e-08, -2.4453999999999996e-10, -3.7849999999999995e-13, -2.3017e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0769999999999999e-36, -1.263846e-31, -4.5653699999999997e-27, -6.5055699999999999e-23, -3.6695199999999999e-19, -7.9435000000000008e-16, -7.0263000000000001e-13, -2.4759000000000002e-10, -3.4771000000000001e-08, -1.9404999999999999e-06, -4.2440999999999996e-05, -0.00034220000000000002, -0.00067650000000000002, 0.0028322, 0.017154000000000003, 0.04342, 0.058620000000000005, 0.043749999999999997, 0.025759999999999998, 0.018255, 0.020630000000000003, 0.040410000000000001, 0.070239999999999997, 0.089269999999999988, 0.10810999999999998, 0.11366000000000001, 0.066860000000000003, 0.014122000000000001, -0.0024438999999999997, -0.0013060000000000001, -0.00014683900000000001, -5.8520999999999994e-06, -2.7914999999999999e-06, -5.1669999999999998e-05, -0.00035110000000000002, -0.00036900000000000002, 0.003519, 0.012931, 0.030180000000000002, 0.080740000000000006, 0.14502999999999999, 0.14307999999999998, 0.11418, 0.10038, 0.07102, 0.02673, 0.0029481000000000004, -0.0012769000000000001, -0.00048329999999999998, -5.4889999999999998e-05, -2.3511000000000002e-06, -3.941e-08, -2.6148000000000001e-10, -6.892999999999999e-13, -7.2260000000000005e-16, -1.4030000000000001e-19, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.361e-35, -1.513e-30, -3.6580000027900005e-26, -3.4550000174800002e-22, -1.24900004155e-18, -1.8020003855000002e-15, -1.0360013670000001e-12, -2.38501934e-10, -2.2271089e-08, -8.632447e-07, -1.4562029999999999e-05, -0.00011538919999999999, -0.00040824000000000004, 0.00026865999999999999, 0.0074427299999999998, 0.024236000000000001, 0.037592, 0.039448999999999998, 0.040390000000000002, 0.047368, 0.045290000000000004, 0.043109999999999996, 0.065069999999999989, 0.084000000000000005, 0.08388000000000001, 0.078009999999999996, 0.066009999999999999, 0.051519999999999996, 0.046550000000000001, 0.055629999999999999, 0.074740000000000001, 0.097983000000000001, 0.086382, 0.063489999999999991, 0.072400000000000006, 0.058990000000000001, 0.038935999999999998, 0.030331999999999998, 0.029690000000000001, 0.037170000000000002, 0.04317, 0.044810000000000003, 0.057980000000000004, 0.075889999999999999, 0.087109999999999993, 0.089520000000000002, 0.088499999999999995, 0.090439999999999993, 0.096759999999999999, 0.1026, 0.10458999999999999, 0.10609, 0.092800000000000007, 0.056925999999999997, 0.017107000000000001, 0.0022797, 0.0054678000000000001, 0.0087969099999999998, 0.0086576974000000008, 0.0093789968080000008, 0.0110199999868, 0.01219999999997858, 0.0067749999999999868, 0.0029060000000000002, 0.0047070000000000002, 0.0056299999999999996, 0.0087919999999999995, 0.01132, 0.0086099999999999996, 0.0044559999999999999, 0.0055139999999994412, 0.0090759999996227001, 0.0097889998984000016, 0.01070998865, 0.0092994370999999985, 0.0061547299999999997, 0.0065844000000000007, 0.0091786999999999997, 0.006215, 0.0083759999999999998, 0.068430000000000005, 0.21668999999999999, 0.37070000000000003, 0.47799999999999998, 0.36899999999999999, 0.13123000000000001, 0.018269000000000001, 0.011729999999999999, 0.022500600000000003, 0.02993055, 0.034198961999999999, 0.042799988399999996, 0.061819999945499998, 0.079549999999893997, 0.070929999999999924, 0.039800000000000002, 0.016230000000000001, 0.01172, 0.01259, 0.0074139999999999996, 0.00513, 0.003588, 0.00072320000000000001, -0.0001964, -6.6060000000000001e-05, -5.1070000000000004e-06, -1.346e-07, -1.308e-09, -4.815e-12, -6.7919999999999999e-15, -3.743e-18, 0.0, 0.0, 0.0, 0.0, 0.0], list(map(lambda x: x.value, dos.scalars))) total_mag = parser.get_total_magnetization() assert(total_mag.scalars[0].value == 3.9999992) assert(total_mag.units == "Bohr") # test number of atoms natoms = parser.get_number_of_atoms() self.assertEqual(natoms.scalars[0].value, 5) self.assertEqual(natoms.units, '/unit cell') # test volumes initial_volume = parser.get_initial_volume() self.assertAlmostEqual(initial_volume.scalars[0].value, 61.15) self.assertEqual(initial_volume.units, "Angstrom^3/cell") final_volume = parser.get_final_volume() self.assertAlmostEqual(final_volume.scalars[0].value, 59.73) self.assertEqual(final_volume.units, "Angstrom^3/cell") # Delete the data delete_example('perov_relax_U') def test_AlNi(self): parser = self.get_parser('AlNi_static_LDA') self._evaluate_AlNi(parser) delete_example('AlNi_static_LDA') def test_AlNi_without_incar(self): """Make sure AlNi test also works without an INCAR an INCAR still parses""" parser = self.get_parser('AlNi_static_LDA') os.unlink(os.path.join('AlNi_static_LDA','INCAR')) self._evaluate_AlNi(parser) delete_example('AlNi_static_LDA') def _evaluate_AlNi(self, parser): """Test that AlNi was parsed correctly""" # Test the settings self.assertEquals('VASP', parser.get_name()) strc = parser.get_output_structure() self.assertAlmostEquals(2.8333249999999999, strc.cell[0][0]) self.assertEquals(['Al','Ni'], strc.get_chemical_symbols()) self.assertEquals('AlNi', parser.get_composition()) res = parser.get_cutoff_energy() self.assertEquals(650, res.scalars[0].value) self.assertEquals('eV', res.units) self.assertTrue(parser.is_converged().scalars[0].value) res = parser.get_total_energy() self.assertAlmostEqual(-12.19669689, res.scalars[0].value) self.assertEquals('eV', res.units) self.assertEquals(None, parser.uses_SOC()) self.assertEquals(None, parser.is_relaxed()) self.assertEquals('PAW', parser.get_xc_functional().scalars[0].value) self.assertEquals(['Al','Ni'], list(map(lambda x: x.value, parser.get_pp_name().vectors[0]))) self.assertEquals(8192, parser.get_KPPRA().scalars[0].value) self.assertEquals('5.3.2', parser.get_version_number()) self.assertEquals(None, parser.get_U_settings()) self.assertEquals(None, parser.get_vdW_settings()) self.assertEquals(12.96, parser.get_pressure().scalars[0].value) self.assertEquals('kbar', parser.get_pressure().units) self.assertEquals([[12.96023,0,0],[0,12.96023,0],[0,0,12.96023]], list(map(lambda x: list(map(lambda y: y.value, x)), parser.get_stresses().matrices[0]))) self.assertEquals('kbar', parser.get_stresses().units) self.assertEquals(0, parser.get_band_gap().scalars[0].value) self.assertEquals('eV', parser.get_band_gap().units) dos = parser.get_dos() self.assertEquals([-3.0259999999999998, -2.903, -2.7810000000000001, -2.6579999999999999, -2.5350000000000001, -2.4119999999999999, -2.2890000000000001, -2.1659999999999999, -2.044, -1.921, -1.798, -1.675, -1.552, -1.4299999999999999, -1.3069999999999999, -1.1839999999999999, -1.0609999999999999, -0.93799999999999994, -0.81599999999999995, -0.69299999999999995, -0.56999999999999995, -0.44700000000000001, -0.32400000000000001, -0.20200000000000001, -0.079000000000000001, 0.043999999999999997, 0.16700000000000001, 0.28999999999999998, 0.41199999999999998, 0.53500000000000003, 0.65800000000000003, 0.78100000000000003, 0.90400000000000003, 1.026, 1.149, 1.272, 1.395, 1.518, 1.6399999999999999, 1.7629999999999999, 1.8859999999999999, 2.0089999999999999, 2.1320000000000001, 2.2549999999999999, 2.3769999999999998, 2.5, 2.6230000000000002, 2.746, 2.8690000000000002, 2.9910000000000001, 3.1139999999999999, 3.2370000000000001, 3.3599999999999999, 3.4830000000000001, 3.605, 3.7280000000000002, 3.851, 3.9740000000000002, 4.0970000000000004, 4.2190000000000003, 4.3419999999999996, 4.4649999999999999, 4.5880000000000001, 4.7110000000000003, 4.8330000000000002, 4.9560000000000004, 5.0789999999999997, 5.202, 5.3250000000000002, 5.4470000000000001, 5.5700000000000003, 5.6929999999999996, 5.8159999999999998, 5.9390000000000001, 6.0620000000000003, 6.1840000000000002, 6.3070000000000004, 6.4299999999999997, 6.5529999999999999, 6.6760000000000002, 6.798, 6.9210000000000003, 7.0439999999999996, 7.1669999999999998, 7.29, 7.4119999999999999, 7.5350000000000001, 7.6580000000000004, 7.7809999999999997, 7.9039999999999999, 8.0259999999999998, 8.1489999999999991, 8.2720000000000002, 8.3949999999999996, 8.5180000000000007, 8.6400000000000006, 8.7629999999999999, 8.8859999999999992, 9.0090000000000003, 9.1319999999999997, 9.2539999999999996, 9.3770000000000007, 9.5, 9.6229999999999993, 9.7460000000000004, 9.8689999999999998, 9.9909999999999997, 10.114000000000001, 10.237, 10.359999999999999, 10.483000000000001, 10.605, 10.728, 10.851000000000001, 10.974, 11.097, 11.218999999999999, 11.342000000000001, 11.465, 11.587999999999999, 11.711, 11.833, 11.956, 12.079000000000001, 12.202, 12.324999999999999, 12.446999999999999, 12.57, 12.693, 12.816000000000001, 12.939, 13.061, 13.183999999999999, 13.307, 13.43, 13.553000000000001, 13.675000000000001, 13.798, 13.920999999999999, 14.044, 14.167, 14.289999999999999, 14.412000000000001, 14.535, 14.657999999999999, 14.781000000000001, 14.904, 15.026, 15.148999999999999, 15.272, 15.395, 15.518000000000001, 15.640000000000001, 15.763, 15.885999999999999, 16.009, 16.132000000000001, 16.254000000000001, 16.376999999999999, 16.5, 16.623000000000001, 16.745999999999999, 16.867999999999999, 16.991, 17.114000000000001, 17.236999999999998, 17.359999999999999, 17.481999999999999, 17.605, 17.728000000000002, 17.850999999999999, 17.974, 18.097000000000001, 18.219000000000001, 18.341999999999999, 18.465, 18.588000000000001, 18.710999999999999, 18.832999999999998, 18.956, 19.079000000000001, 19.202000000000002, 19.324999999999999, 19.446999999999999, 19.57, 19.693000000000001, 19.815999999999999, 19.939, 20.061, 20.184000000000001, 20.306999999999999, 20.43, 20.553000000000001, 20.675000000000001, 20.797999999999998, 20.920999999999999, 21.044, 21.167000000000002, 21.289000000000001, 21.411999999999999, 21.535, 21.658000000000001, 21.780999999999999, 21.902999999999999, 22.026, 22.149000000000001, 22.271999999999998, 22.395, 22.518000000000001, 22.640000000000001, 22.763000000000002, 22.885999999999999, 23.009, 23.132000000000001, 23.254000000000001, 23.376999999999999, 23.5, 23.623000000000001, 23.745999999999999, 23.867999999999999, 23.991, 24.114000000000001, 24.236999999999998, 24.359999999999999, 24.481999999999999, 24.605, 24.728000000000002, 24.850999999999999, 24.974, 25.096, 25.219000000000001, 25.341999999999999, 25.465, 25.588000000000001, 25.710000000000001, 25.832999999999998, 25.956, 26.079000000000001, 26.202000000000002, 26.324999999999999, 26.446999999999999, 26.57, 26.693000000000001, 26.815999999999999, 26.939, 27.061, 27.184000000000001, 27.306999999999999, 27.43, 27.553000000000001, 27.675000000000001, 27.797999999999998, 27.920999999999999, 28.044, 28.167000000000002, 28.289000000000001, 28.411999999999999, 28.535, 28.658000000000001, 28.780999999999999, 28.902999999999999, 29.026, 29.149000000000001, 29.271999999999998, 29.395, 29.516999999999999, 29.640000000000001, 29.763000000000002, 29.885999999999999, 30.009, 30.131, 30.254000000000001, 30.376999999999999, 30.5, 30.623000000000001, 30.745999999999999, 30.867999999999999, 30.991, 31.114000000000001, 31.236999999999998, 31.359999999999999, 31.481999999999999, 31.605, 31.728000000000002, 31.850999999999999, 31.974, 32.095999999999997, 32.219000000000001, 32.341999999999999, 32.465000000000003, 32.588000000000001, 32.710000000000001, 32.832999999999998, 32.956000000000003, 33.079000000000001, 33.201999999999998, 33.323999999999998, 33.447000000000003, 33.57, 33.692999999999998, 33.816000000000003], list(map(lambda x: x.value, dos.conditions.scalars))) self.assertEquals('eV', dos.conditions.units) self.assertEquals([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.034779999999999998, 0.058860000000000003, 0.087639999999999996, 0.097239999999999993, 0.11268, 0.12908, 0.13766, 0.14779999999999999, 0.15936, 0.17232, 0.17985999999999999, 0.18837999999999999, 0.19753999999999999, 0.20760000000000001, 0.21779999999999999, 0.22600000000000001, 0.23419999999999999, 0.2422, 0.25080000000000002, 0.26000000000000001, 0.27000000000000002, 0.27900000000000003, 0.28699999999999998, 0.29499999999999998, 0.3044, 0.31419999999999998, 0.32419999999999999, 0.33460000000000001, 0.3448, 0.35659999999999997, 0.36940000000000001, 0.38340000000000002, 0.39860000000000001, 0.41520000000000001, 0.43580000000000002, 0.45839999999999997, 0.46700000000000003, 0.47920000000000001, 0.49320000000000003, 0.50960000000000005, 0.53200000000000003, 0.5534, 0.56879999999999997, 0.6048, 0.63600000000000001, 0.65880000000000005, 0.71020000000000005, 0.75839999999999996, 0.65300000000000002, 0.623, 0.61660000000000004, 0.62480000000000002, 0.63859999999999995, 0.66200000000000003, 0.7016, 0.76400000000000001, 0.82920000000000005, 0.96719999999999995, 1.7285999999999999, 2.1680000000000001, 1.8826000000000001, 1.599, 1.4379999999999999, 1.5680000000000001, 2.4300000000000002, 2.4079999999999999, 3.5880000000000001, 2.9319999999999999, 2.742, 1.9369000000000001, 0.71629999999999994, 3.5699999999999998, 4.3099999999999996, 3.242, 4.4660000000000002, 8.8300000000000001, 5.7919999999999998, 5.1760000000000002, 4.0540000000000003, 2.8900000000000001, 0.88260000000000005, 0.7258, 0.62339999999999995, 0.52700000000000002, 0.35659999999999997, 0.4929, 0.65859999999999996, 2.6619999999999999, 1.5973999999999999, 1.3284, 1.1026, 0.9264, 0.83499999999999996, 0.7722, 0.7046, 0.6482, 0.60140000000000005, 0.5484, 0.51039999999999996, 0.47660000000000002, 0.44700000000000001, 0.41199999999999998, 0.38600000000000001, 0.35539999999999999, 0.2994, 0.26019999999999999, 0.26479999999999998, 0.309, 0.35599999999999998, 0.3886, 0.44440000000000002, 0.5766, 0.6976, 0.73719999999999997, 0.80220000000000002, 0.8226, 0.89459999999999995, 0.86519999999999997, 0.82620000000000005, 0.78720000000000001, 0.77880000000000005, 0.83579999999999999, 0.74099999999999999, 0.73340000000000005, 0.7944, 0.73919999999999997, 0.73199999999999998, 0.74119999999999997, 0.69840000000000002, 0.73860000000000003, 0.71079999999999999, 0.70199999999999996, 0.77439999999999998, 0.74460000000000004, 0.79279999999999995, 0.89900000000000002, 0.82720000000000005, 0.81620000000000004, 0.85199999999999998, 0.85819999999999996, 0.876, 0.87819999999999998, 0.89100000000000001, 0.89880000000000004, 0.874, 0.85399999999999998, 0.85719999999999996, 0.8518, 0.84819999999999995, 0.83499999999999996, 0.8206, 0.80500000000000005, 0.78900000000000003, 0.77400000000000002, 0.75880000000000003, 0.753, 0.75580000000000003, 0.75080000000000002, 0.73280000000000001, 0.71079999999999999, 0.69040000000000001, 0.65900000000000003, 0.66139999999999999, 0.67700000000000005, 0.70620000000000005, 0.74180000000000001, 0.79459999999999997, 0.878, 0.98299999999999998, 1.0628, 1.1044, 1.0998000000000001, 1.0371999999999999, 0.92179999999999995, 0.85260000000000002, 0.80100000000000005, 0.75239999999999996, 0.70079999999999998, 0.66920000000000002, 0.65180000000000005, 0.64159999999999995, 0.63300000000000001, 0.62760000000000005, 0.62780000000000002, 0.64059999999999995, 0.66620000000000001, 0.6784, 0.68300000000000005, 0.67479999999999996, 0.6804, 0.755, 0.7802, 0.80659999999999998, 0.82120000000000004, 0.83640000000000003, 0.85660000000000003, 0.90500000000000003, 0.90880000000000005, 0.88019999999999998, 0.86419999999999997, 0.84719999999999995, 0.82740000000000002, 0.80020000000000002, 0.75800000000000001, 0.69979999999999998, 0.71360000000000001, 0.90400000000000003, 0.9768, 1.123, 1.2014, 0.93259999999999998, 0.81240000000000001, 0.71319999999999995, 0.65780000000000005, 0.65639999999999998, 0.6472, 0.62829999999999997, 0.63440000000000007, 0.6885, 0.76059999999999994, 0.83579999999999999, 0.93270000000000008, 1.0291000000000001, 1.0882000000000001, 1.0954000000000002, 1.0880999999999998, 1.0526, 0.98049999999999993, 0.86430000000000007, 0.8085, 0.41639999999999999, 0.29980000000000001, 0.24840000000000001, 0.1825, 0.13319999999999999, 0.091359999999999997, 0.065560000000000007, 0.048509999999999998, 0.039219999999999998, 0.030629999999999998, 0.02239, 0.016642000000000001, 0.015498000000000001, 0.01456, 0.013663, 0.012806999999999999, 0.011991, 0.011218000000000001, 0.010484, 0.009777000000000001, 0.0090940000000000014, 0.0084370000000000001, 0.0079260000000000008, 0.0081139999999999997, 0.0071549999999999999, 0.0062389999999999998, 0.0053679999999999995, 0.0045409999999999999, 0.0037580000000000001, 0.003019, 0.00232, 0.0016800999999999999, 0.0011435, 0.00070980000000000001, 0.0003791, 0.00015118999999999999, 2.6239999999999999e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], list(map(lambda x: x.value, dos.scalars))) self.assertEquals('number of states per unit cell', dos.units) # test number of atoms natoms = parser.get_number_of_atoms() self.assertEqual(natoms.scalars[0].value, 2) self.assertEqual(natoms.units, '/unit cell') # test volumes initial_volume = parser.get_initial_volume() self.assertAlmostEqual(initial_volume.scalars[0].value, 22.75) self.assertEqual(initial_volume.units, "Angstrom^3/cell") final_volume = parser.get_final_volume() self.assertAlmostEqual(final_volume.scalars[0].value, 22.75) self.assertEqual(final_volume.units, "Angstrom^3/cell") def test_SOC(self): # Parse the results parser = self.get_parser('heusler_static_SOC') # Test the settings self.assertEquals('VASP', parser.get_name()) res = parser.get_cutoff_energy() self.assertEquals(499, res.scalars[0].value) self.assertEquals('eV', res.units) # Make sure it gets the last ionic step strc = parser.get_output_structure() self.assertAlmostEquals(3.3681598291240786, strc.cell[1][0]) self.assertEquals(['Li','Pt','Sn','Y'], strc.get_chemical_symbols()) self.assertEquals('LiPtSnY', parser.get_composition()) self.assertTrue(parser.is_converged()) self.assertAlmostEqual(-22.273992, parser.get_total_energy().scalars[0].value) self.assertTrue(isinstance(parser.uses_SOC(), Value)) self.assertEquals(None, parser.is_relaxed()) self.assertEquals('PAW_PBE', parser.get_xc_functional().scalars[0].value) self.assertEquals(['Li_sv','Pt','Sn_d','Y_sv'], list(map(lambda x: x.value, parser.get_pp_name().vectors[0]))) self.assertEquals(1440, parser.get_KPPRA().scalars[0].value) self.assertEquals('5.2.11', parser.get_version_number()) self.assertEquals(None, parser.get_U_settings()) self.assertEquals(None, parser.get_vdW_settings()) self.assertEquals(None, parser.get_pressure()) self.assertEquals(None, parser.get_stresses()) self.assertEquals(0.757, parser.get_band_gap().scalars[0].value) dos = parser.get_dos() self.assertEquals([-42.363, -42.173000000000002, -41.984000000000002, -41.795000000000002, -41.604999999999997, -41.415999999999997, -41.226999999999997, -41.036999999999999, -40.847999999999999, -40.658000000000001, -40.469000000000001, -40.280000000000001, -40.090000000000003, -39.901000000000003, -39.710999999999999, -39.521999999999998, -39.332999999999998, -39.143000000000001, -38.954000000000001, -38.764000000000003, -38.575000000000003, -38.386000000000003, -38.195999999999998, -38.006999999999998, -37.817, -37.628, -37.439, -37.249000000000002, -37.060000000000002, -36.871000000000002, -36.680999999999997, -36.491999999999997, -36.302, -36.113, -35.923999999999999, -35.734000000000002, -35.545000000000002, -35.354999999999997, -35.165999999999997, -34.976999999999997, -34.786999999999999, -34.597999999999999, -34.408000000000001, -34.219000000000001, -34.030000000000001, -33.840000000000003, -33.651000000000003, -33.462000000000003, -33.271999999999998, -33.082999999999998, -32.893000000000001, -32.704000000000001, -32.515000000000001, -32.325000000000003, -32.136000000000003, -31.946000000000002, -31.757000000000001, -31.568000000000001, -31.378, -31.189, -30.998999999999999, -30.809999999999999, -30.620999999999999, -30.431000000000001, -30.242000000000001, -30.053000000000001, -29.863, -29.673999999999999, -29.484000000000002, -29.295000000000002, -29.106000000000002, -28.916, -28.727, -28.536999999999999, -28.347999999999999, -28.158999999999999, -27.969000000000001, -27.780000000000001, -27.59, -27.401, -27.212, -27.021999999999998, -26.832999999999998, -26.643999999999998, -26.454000000000001, -26.265000000000001, -26.074999999999999, -25.885999999999999, -25.696999999999999, -25.507000000000001, -25.318000000000001, -25.128, -24.939, -24.75, -24.559999999999999, -24.370999999999999, -24.181000000000001, -23.992000000000001, -23.803000000000001, -23.613, -23.423999999999999, -23.234999999999999, -23.045000000000002, -22.856000000000002, -22.666, -22.477, -22.288, -22.097999999999999, -21.908999999999999, -21.719000000000001, -21.530000000000001, -21.341000000000001, -21.151, -20.962, -20.771999999999998, -20.582999999999998, -20.393999999999998, -20.204000000000001, -20.015000000000001, -19.824999999999999, -19.635999999999999, -19.446999999999999, -19.257000000000001, -19.068000000000001, -18.879000000000001, -18.689, -18.5, -18.309999999999999, -18.120999999999999, -17.931999999999999, -17.742000000000001, -17.553000000000001, -17.363, -17.173999999999999, -16.984999999999999, -16.795000000000002, -16.606000000000002, -16.416, -16.227, -16.038, -15.848000000000001, -15.659000000000001, -15.470000000000001, -15.279999999999999, -15.090999999999999, -14.901, -14.712, -14.523, -14.333, -14.144, -13.954000000000001, -13.765000000000001, -13.576000000000001, -13.385999999999999, -13.196999999999999, -13.007, -12.818, -12.629, -12.439, -12.25, -12.061, -11.871, -11.682, -11.492000000000001, -11.303000000000001, -11.114000000000001, -10.923999999999999, -10.734999999999999, -10.545, -10.356, -10.167, -9.9770000000000003, -9.7880000000000003, -9.5980000000000008, -9.4090000000000007, -9.2200000000000006, -9.0299999999999994, -8.8409999999999993, -8.6519999999999992, -8.4619999999999997, -8.2729999999999997, -8.0830000000000002, -7.8940000000000001, -7.7050000000000001, -7.5149999999999997, -7.3259999999999996, -7.1360000000000001, -6.9470000000000001, -6.758, -6.5679999999999996, -6.3789999999999996, -6.1890000000000001, -6.0, -5.8109999999999999, -5.6210000000000004, -5.4320000000000004, -5.2430000000000003, -5.0529999999999999, -4.8639999999999999, -4.6740000000000004, -4.4850000000000003, -4.2960000000000003, -4.1059999999999999, -3.9169999999999998, -3.7269999999999999, -3.5379999999999998, -3.3490000000000002, -3.1589999999999998, -2.9700000000000002, -2.7799999999999998, -2.5910000000000002, -2.4020000000000001, -2.2120000000000002, -2.0230000000000001, -1.833, -1.6439999999999999, -1.4550000000000001, -1.2649999999999999, -1.0760000000000001, -0.88700000000000001, -0.69699999999999995, -0.50800000000000001, -0.318, -0.129, 0.059999999999999998, 0.25, 0.439, 0.629, 0.81799999999999995, 1.0069999999999999, 1.1970000000000001, 1.3859999999999999, 1.5760000000000001, 1.7649999999999999, 1.954, 2.1440000000000001, 2.3330000000000002, 2.5219999999999998, 2.7120000000000002, 2.9009999999999998, 3.0910000000000002, 3.2799999999999998, 3.4689999999999999, 3.6589999999999998, 3.8479999999999999, 4.0380000000000003, 4.2270000000000003, 4.4160000000000004, 4.6059999999999999, 4.7949999999999999, 4.9850000000000003, 5.1740000000000004, 5.3630000000000004, 5.5529999999999999, 5.742, 5.931, 6.1210000000000004, 6.3099999999999996, 6.5, 6.6890000000000001, 6.8780000000000001, 7.0679999999999996, 7.2569999999999997, 7.4470000000000001, 7.6360000000000001, 7.8250000000000002, 8.0150000000000006, 8.2040000000000006, 8.3940000000000001, 8.5830000000000002, 8.7720000000000002, 8.9619999999999997, 9.1509999999999998, 9.3399999999999999, 9.5299999999999994, 9.7189999999999994, 9.9090000000000007, 10.098000000000001, 10.287000000000001, 10.477, 10.666, 10.856, 11.045, 11.234, 11.423999999999999, 11.613, 11.803000000000001, 11.992000000000001, 12.180999999999999, 12.371, 12.56, 12.749000000000001, 12.939, 13.128, 13.318, 13.507, 13.696, 13.885999999999999, 14.074999999999999, 14.265000000000001, 14.454000000000001], list(map(lambda x: x.value, dos.conditions.scalars))) self.assertEquals([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.56, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.56, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 9.6470000000000002, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.6320000000000001, 6.9969999999999999, 21.120000000000001, 0.0, 0.0, 0.0, 0.0, 31.149999999999999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.040309999999999999, 0.57709999999999995, 0.31590000000000001, 0.26400000000000001, 0.21940000000000001, 0.22090000000000001, 0.25740000000000002, 0.28789999999999999, 3.5, 2.7050000000000001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.61719999999999997, 1.3440000000000001, 2.7330000000000001, 3.1349999999999998, 4.6900000000000004, 2.9540000000000002, 4.0010000000000003, 4.2530000000000001, 6.0140000000000002, 2.2869999999999999, 1.278, 3.1720000000000002, 5.4370000000000003, 6.3730000000000002, 3.6629999999999998, 1.8360000000000001, 1.4099999999999999, 0.98019999999999996, 0.94330000000000003, 0.65300000000000002, 1.74, 1.8180000000000001, 2.1400000000000001, 2.0209999999999999, 2.2629999999999999, 1.446, 0.72999999999999998, 1.3420000000000001, 0.0, 0.0, 1.1880000000000001e-13, 0.99199999999999999, 0.37909999999999999, 1.0660000000000001, 2.2519999999999998, 1.728, 2.125, 1.841, 1.766, 2.1459999999999999, 4.468, 6.1970000000000001, 6.0819999999999999, 2.77, 1.9319999999999999, 1.4470000000000001, 4.2309999999999999, 2.5150000000000001, 0.52410000000000001, 0.2462, 0.20069999999999999, 0.12670000000000001, 0.091480000000000006, 0.05867, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], list(map(lambda x: x.value, dos.scalars))) positions = parser.get_positions() xyz = list(map(lambda x: list(map(lambda y: y.value, x)), positions.vectors)) self.assertEqual([[5.05224, 5.05224, 5.05224], [1.68408, 1.68408, 1.68408], [3.36817, 3.36817, 3.36817], [0.0, 0.0, 0.0]], xyz) forces = parser.get_forces() for f in forces.vectors: for x in f: self.assertAlmostEqual(0.0, x.value) # test number of atoms natoms = parser.get_number_of_atoms() self.assertEqual(natoms.scalars[0].value, 4) self.assertEqual(natoms.units, '/unit cell') # test volumes initial_volume = parser.get_initial_volume() self.assertAlmostEqual(initial_volume.scalars[0].value, 76.42) self.assertEqual(initial_volume.units, "Angstrom^3/cell") final_volume = parser.get_final_volume() self.assertAlmostEqual(final_volume.scalars[0].value, 76.42) self.assertEqual(final_volume.units, "Angstrom^3/cell") # Delete the data delete_example('heusler_static_SOC') def test_vdW(self): # Parse the results parser = self.get_parser('vdW') # Test the settings self.assertEquals('VASP', parser.get_name()) strc = parser.get_output_structure() self.assertAlmostEquals(-12.6699720530641162, strc.cell[1][0]) self.assertEquals(['C']*48, strc.get_chemical_symbols()[:48]) self.assertEquals('Br16C48Fe6H48N12S12', parser.get_composition()) res = parser.get_cutoff_energy() self.assertEquals(520, res.scalars[0].value) self.assertEquals('eV', res.units) self.assertTrue(parser.is_converged().scalars[0].value) self.assertAlmostEqual(-707.48169596, parser.get_total_energy().scalars[0].value) self.assertEquals(None, parser.uses_SOC()) self.assertTrue(isinstance(parser.is_relaxed(), Value)) self.assertEquals('PAW_PBE', parser.get_xc_functional().scalars[0].value) self.assertEquals(['C','H','Br','Fe','N','S'], list(map(lambda x: x.value, parser.get_pp_name().vectors[0]))) self.assertEquals(142, parser.get_KPPRA().scalars[0].value) self.assertEquals('5.3.5', parser.get_version_number()) self.assertEquals(None, parser.get_U_settings()) self.assertEquals('optB88-vdW', parser.get_vdW_settings().scalars[0].value) self.assertEquals(-0.07, parser.get_pressure().scalars[0].value) self.assertEquals([[-4.09956,0,0],[0,-4.09956,0],[0,0,-4.00192]], list(map(lambda x: list(map(lambda y: y.value, x)), parser.get_stresses().matrices[0]))) self.assertEquals(0, parser.get_band_gap().scalars[0].value) dos = parser.get_dos() self.assertEquals([-22.135000000000002, -22.029, -21.923999999999999, -21.818999999999999, -21.713000000000001, -21.608000000000001, -21.501999999999999, -21.396999999999998, -21.291, -21.186, -21.079999999999998, -20.975000000000001, -20.869, -20.763999999999999, -20.658000000000001, -20.553000000000001, -20.448, -20.341999999999999, -20.236999999999998, -20.131, -20.026, -19.920000000000002, -19.815000000000001, -19.709, -19.603999999999999, -19.498000000000001, -19.393000000000001, -19.288, -19.181999999999999, -19.077000000000002, -18.971, -18.866, -18.760000000000002, -18.655000000000001, -18.548999999999999, -18.443999999999999, -18.338000000000001, -18.233000000000001, -18.128, -18.021999999999998, -17.917000000000002, -17.811, -17.706, -17.600000000000001, -17.495000000000001, -17.388999999999999, -17.283999999999999, -17.178000000000001, -17.073, -16.966999999999999, -16.861999999999998, -16.757000000000001, -16.651, -16.545999999999999, -16.440000000000001, -16.335000000000001, -16.228999999999999, -16.123999999999999, -16.018000000000001, -15.913, -15.807, -15.702, -15.597, -15.491, -15.385999999999999, -15.279999999999999, -15.175000000000001, -15.069000000000001, -14.964, -14.858000000000001, -14.753, -14.647, -14.542, -14.436, -14.331, -14.226000000000001, -14.119999999999999, -14.015000000000001, -13.909000000000001, -13.804, -13.698, -13.593, -13.487, -13.382, -13.276, -13.170999999999999, -13.066000000000001, -12.960000000000001, -12.855, -12.749000000000001, -12.644, -12.538, -12.433, -12.327, -12.222, -12.116, -12.010999999999999, -11.906000000000001, -11.800000000000001, -11.695, -11.589, -11.484, -11.378, -11.273, -11.167, -11.061999999999999, -10.956, -10.851000000000001, -10.744999999999999, -10.640000000000001, -10.535, -10.429, -10.324, -10.218, -10.113, -10.007, -9.9019999999999992, -9.7959999999999994, -9.6910000000000007, -9.5850000000000009, -9.4800000000000004, -9.375, -9.2690000000000001, -9.1639999999999997, -9.0579999999999998, -8.9529999999999994, -8.8469999999999995, -8.7420000000000009, -8.6359999999999992, -8.5310000000000006, -8.4250000000000007, -8.3200000000000003, -8.2149999999999999, -8.109, -8.0039999999999996, -7.8979999999999997, -7.7930000000000001, -7.6870000000000003, -7.5819999999999999, -7.476, -7.3710000000000004, -7.2649999999999997, -7.1600000000000001, -7.0540000000000003, -6.9489999999999998, -6.8440000000000003, -6.7380000000000004, -6.633, -6.5270000000000001, -6.4219999999999997, -6.3159999999999998, -6.2110000000000003, -6.1050000000000004, -6.0, -5.8940000000000001, -5.7889999999999997, -5.6840000000000002, -5.5780000000000003, -5.4729999999999999, -5.367, -5.2619999999999996, -5.1559999999999997, -5.0510000000000002, -4.9450000000000003, -4.8399999999999999, -4.734, -4.6289999999999996, -4.5229999999999997, -4.4180000000000001, -4.3129999999999997, -4.2069999999999999, -4.1020000000000003, -3.996, -3.891, -3.7850000000000001, -3.6800000000000002, -3.5739999999999998, -3.4689999999999999, -3.363, -3.258, -3.153, -3.0470000000000002, -2.9420000000000002, -2.8359999999999999, -2.7309999999999999, -2.625, -2.52, -2.4140000000000001, -2.3090000000000002, -2.2029999999999998, -2.0979999999999999, -1.9930000000000001, -1.887, -1.782, -1.6759999999999999, -1.571, -1.4650000000000001, -1.3600000000000001, -1.254, -1.149, -1.0429999999999999, -0.93799999999999994, -0.83199999999999996, -0.72699999999999998, -0.622, -0.51600000000000001, -0.41099999999999998, -0.30499999999999999, -0.20000000000000001, -0.094, 0.010999999999999999, 0.11700000000000001, 0.222, 0.32800000000000001, 0.433, 0.53800000000000003, 0.64400000000000002, 0.749, 0.85499999999999998, 0.95999999999999996, 1.0660000000000001, 1.171, 1.2769999999999999, 1.3819999999999999, 1.488, 1.593, 1.698, 1.804, 1.909, 2.0150000000000001, 2.1200000000000001, 2.226, 2.331, 2.4369999999999998, 2.5419999999999998, 2.6480000000000001, 2.7530000000000001, 2.859, 2.964, 3.069, 3.1749999999999998, 3.2799999999999998, 3.3860000000000001, 3.4910000000000001, 3.597, 3.702, 3.8079999999999998, 3.9129999999999998, 4.0190000000000001, 4.1239999999999997, 4.2290000000000001, 4.335, 4.4400000000000004, 4.5460000000000003, 4.6509999999999998, 4.7569999999999997, 4.8620000000000001, 4.968, 5.0730000000000004, 5.1790000000000003, 5.2839999999999998, 5.3890000000000002, 5.4950000000000001, 5.5999999999999996, 5.7060000000000004, 5.8109999999999999, 5.9169999999999998, 6.0220000000000002, 6.1280000000000001, 6.2329999999999997, 6.3390000000000004, 6.444, 6.5499999999999998, 6.6550000000000002, 6.7599999999999998, 6.8659999999999997, 6.9710000000000001, 7.077, 7.1820000000000004, 7.2880000000000003, 7.3929999999999998, 7.4989999999999997, 7.6040000000000001, 7.71, 7.8150000000000004, 7.9199999999999999, 8.0259999999999998, 8.1310000000000002, 8.2370000000000001, 8.3420000000000005, 8.4480000000000004, 8.5530000000000008, 8.6590000000000007, 8.7639999999999993, 8.8699999999999992, 8.9749999999999996, 9.0809999999999995, 9.1859999999999999, 9.2910000000000004, 9.3970000000000002, 9.5020000000000007], list(map(lambda x: x.value, dos.conditions.scalars))) self.assertEquals([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.6319999999999999e-17, 3.143e-15, 2.6169999999999999e-13, 1.5350000000000001e-11, 6.3450000000000003e-10, 1.85e-08, 3.8080000000000002e-07, 5.5450000000000003e-06, 5.7269999999999999e-05, 0.0004215, 0.0022279999999999999, 0.0085550000000000001, 0.024320000000000001, 0.052359999999999997, 0.087309999999999999, 0.1142, 0.1181, 0.1023, 0.091939999999999994, 0.107, 0.13159999999999999, 0.13059999999999999, 0.094570000000000001, 0.048860000000000001, 0.017909999999999999, 0.004653, 0.00085590000000000004, 0.0001114, 1.025e-05, 6.6710000000000004e-07, 3.0659999999999998e-08, 9.944e-10, 2.2749999999999999e-11, 3.6710000000000002e-13, 4.211e-15, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.6849999999999997e-17, 4.5060000000000002e-15, 3.6630000000000001e-13, 2.1149999999999999e-11, 8.6729999999999999e-10, 2.531e-08, 5.2679999999999998e-07, 7.8390000000000007e-06, 8.3570000000000001e-05, 0.00063960000000000004, 0.0035209999999999998, 0.01397, 0.040129999999999999, 0.084239999999999995, 0.13339999999999999, 0.17269999999999999, 0.20960000000000001, 0.25469999999999998, 0.28050000000000003, 0.24560000000000001, 0.15970000000000001, 0.075359999999999996, 0.026380000000000001, 0.011169999999999999, 0.020629999999999999, 0.056489999999999999, 0.1211, 0.19800000000000001, 0.2586, 0.29349999999999998, 0.31209999999999999, 0.3054, 0.25030000000000002, 0.1585, 0.075039999999999996, 0.02937, 0.020729999999999998, 0.03882, 0.073150000000000007, 0.1066, 0.1186, 0.1008, 0.065310000000000007, 0.031879999999999999, 0.011610000000000001, 0.0034150000000000001, 0.002552, 0.0084089999999999998, 0.025819999999999999, 0.058729999999999997, 0.098680000000000004, 0.1231, 0.11409999999999999, 0.078479999999999994, 0.040129999999999999, 0.016910000000000001, 0.01357, 0.031550000000000002, 0.070419999999999996, 0.1143, 0.13220000000000001, 0.1089, 0.063869999999999996, 0.026679999999999999, 0.0079389999999999999, 0.0016819999999999999, 0.00025619999999999999, 5.9740000000000001e-05, 0.00028899999999999998, 0.0018500000000000001, 0.0087819999999999999, 0.03082, 0.080199999999999994, 0.1552, 0.22389999999999999, 0.24160000000000001, 0.1958, 0.1208, 0.061920000000000003, 0.043180000000000003, 0.065579999999999999, 0.11210000000000001, 0.15129999999999999, 0.15820000000000001, 0.13919999999999999, 0.1203, 0.11260000000000001, 0.1028, 0.078009999999999996, 0.044979999999999999, 0.019029999999999998, 0.0059649999999999998, 0.0023180000000000002, 0.0052659999999999998, 0.018010000000000002, 0.047750000000000001, 0.097589999999999996, 0.1588, 0.20999999999999999, 0.22620000000000001, 0.19950000000000001, 0.15759999999999999, 0.14380000000000001, 0.17199999999999999, 0.21440000000000001, 0.23350000000000001, 0.21890000000000001, 0.19220000000000001, 0.18229999999999999, 0.1951, 0.2117, 0.21820000000000001, 0.219, 0.2167, 0.21029999999999999, 0.2117, 0.23849999999999999, 0.29120000000000001, 0.34539999999999998, 0.36799999999999999, 0.35039999999999999, 0.31669999999999998, 0.29139999999999999, 0.2722, 0.24679999999999999, 0.22059999999999999, 0.2094, 0.21229999999999999, 0.21179999999999999, 0.20000000000000001, 0.18690000000000001, 0.18140000000000001, 0.18840000000000001, 0.21740000000000001, 0.26579999999999998, 0.31019999999999998, 0.33589999999999998, 0.35599999999999998, 0.38740000000000002, 0.42420000000000002, 0.44290000000000002, 0.42880000000000001, 0.3901, 0.34289999999999998, 0.29120000000000001, 0.23169999999999999, 0.1731, 0.13800000000000001, 0.14460000000000001, 0.18640000000000001, 0.2293, 0.2344, 0.19320000000000001, 0.13400000000000001, 0.095130000000000006, 0.091319999999999998, 0.1056, 0.1113, 0.097659999999999997, 0.070879999999999999, 0.042029999999999998, 0.020400000000000001, 0.01214, 0.024029999999999999, 0.068360000000000004, 0.15179999999999999, 0.24809999999999999, 0.29959999999999998, 0.27460000000000001, 0.21390000000000001, 0.18720000000000001, 0.2112, 0.23899999999999999, 0.22289999999999999, 0.16350000000000001, 0.094339999999999993, 0.042779999999999999, 0.01506, 0.004032, 0.00080500000000000005, 0.00013300000000000001, 0.00013970000000000001, 0.00073349999999999999, 0.003088, 0.0099260000000000008, 0.025839999999999998, 0.057540000000000001, 0.11, 0.1724, 0.2135, 0.21379999999999999, 0.1966, 0.19850000000000001, 0.22040000000000001, 0.22919999999999999, 0.1983, 0.1343, 0.070239999999999997, 0.03288, 0.027310000000000001, 0.047260000000000003, 0.086449999999999999, 0.13769999999999999, 0.18809999999999999, 0.2185, 0.2104, 0.1613, 0.095079999999999998, 0.042020000000000002, 0.014149999999999999, 0.005764, 0.0096120000000000008, 0.023990000000000001, 0.048349999999999997, 0.080140000000000003, 0.1201, 0.1731, 0.2354, 0.28839999999999999, 0.31030000000000002, 0.29310000000000003, 0.25169999999999998, 0.21129999999999999, 0.17999999999999999, 0.1449, 0.099610000000000004, 0.054780000000000002, 0.02332, 0.0075259999999999997, 0.0018090000000000001, 0.0003191, 4.0849999999999997e-05, 3.7639999999999999e-06, 2.481e-07, 1.165e-08, 3.8859999999999998e-10, 9.1830000000000008e-12, 1.535e-13, 1.811e-15, 2.106e-17, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], list(map(lambda x: x.value, dos.scalars))) # test number of atoms natoms = parser.get_number_of_atoms() self.assertEqual(natoms.scalars[0].value, 142) self.assertEqual(natoms.units, '/unit cell') # test volumes initial_volume = parser.get_initial_volume() self.assertAlmostEqual(initial_volume.scalars[0].value, 2005.82) self.assertEqual(initial_volume.units, "Angstrom^3/cell") final_volume = parser.get_final_volume() self.assertAlmostEqual(final_volume.scalars[0].value, 2005.73) self.assertEqual(final_volume.units, "Angstrom^3/cell") # Delete the data delete_example('vdW') def test_filename_robustness(self): """Make sure that parser can handle OUTCARs having other extensions""" # Unpack an example and rename the OUTCAR file unpack_example(os.path.join('examples', 'vasp', 'perov_relax_U.tar.gz')) shutil.move(os.path.join('perov_relax_U', 'OUTCAR'), os.path.join('perov_relax_U', 'OUTCAR_newname')) # Make the parser try: parser = VaspParser.generate_from_directory("perov_relax_U") self.assertEquals(parser.get_name(), 'VASP') # Test the cutoff energy res = parser.get_cutoff_energy() self.assertEquals(400, res.scalars[0].value) self.assertEquals('eV', res.units) # Test whether it is converged self.assertTrue(parser.is_converged().scalars[0].value) # Test total energy self.assertAlmostEqual(-39.85550532, parser.get_total_energy().scalars[0].value) finally: delete_example('perov_relax_U') def test_fail_with_multiple_files(self): # Unpack an example and duplicate the OUTCAR file unpack_example(os.path.join('examples', 'vasp', 'perov_relax_U.tar.gz')) shutil.copy(os.path.join('perov_relax_U', 'OUTCAR'), os.path.join('perov_relax_U', 'OUTCAR_newname')) # Make the parser with self.assertRaises(InvalidIngesterException) as context: VaspParser.generate_from_directory('perov_relax_U') # Make the parser, but setting `files` to not include `OUTCAR_newname` acceptable_files = [f for f in os.listdir('perov_relax_U')] acceptable_files.remove('OUTCAR_newname') acceptable_files = [os.path.join('perov_relax_U', f) for f in acceptable_files] try: VaspParser(acceptable_files) finally: delete_example('perov_relax_U') if __name__ == '__main__': unittest.main()
163.671924
5,414
0.744468
6,691
51,884
5.728441
0.296219
0.042527
0.061285
0.079105
0.191917
0.188473
0.179551
0.170367
0.158131
0.149547
0
0.647745
0.10907
51,884
316
5,415
164.189873
0.181439
0.016556
0
0.453333
0
0
0.01638
0
0
0
0
0
0.56
1
0.04
false
0
0.031111
0
0.08
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
6700edb4acf1e33e1e4cb6c03bcc70a030076e75
129
py
Python
library/.config/calibre/plugins/DeDRM/libraryfiles/subasyncio.py
funkeyfreak/calibre-drm-stripper
90813b644c86543fb423b4fd664685a02b43e525
[ "Apache-2.0" ]
null
null
null
library/.config/calibre/plugins/DeDRM/libraryfiles/subasyncio.py
funkeyfreak/calibre-drm-stripper
90813b644c86543fb423b4fd664685a02b43e525
[ "Apache-2.0" ]
null
null
null
library/.config/calibre/plugins/DeDRM/libraryfiles/subasyncio.py
funkeyfreak/calibre-drm-stripper
90813b644c86543fb423b4fd664685a02b43e525
[ "Apache-2.0" ]
1
2022-02-05T00:18:21.000Z
2022-02-05T00:18:21.000Z
version https://git-lfs.github.com/spec/v1 oid sha256:8b5b0f1e27b19ecfc98cd83b0e72bf326cc65501fb2b3ae900e9f4af7b9db87d size 5116
32.25
75
0.883721
13
129
8.769231
1
0
0
0
0
0
0
0
0
0
0
0.341463
0.046512
129
3
76
43
0.585366
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
670eec4e7a17b15c7020fdb076d1f89d690dc495
9,298
py
Python
integration-test/343-winter-sports-resorts.py
rinnyB/vector-datasource
024909ed8245a4ad4a25c908413ba3602de6c335
[ "MIT" ]
null
null
null
integration-test/343-winter-sports-resorts.py
rinnyB/vector-datasource
024909ed8245a4ad4a25c908413ba3602de6c335
[ "MIT" ]
2
2021-03-31T20:22:37.000Z
2021-12-13T20:50:11.000Z
integration-test/343-winter-sports-resorts.py
rinnyB/vector-datasource
024909ed8245a4ad4a25c908413ba3602de6c335
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- from shapely.wkt import loads as wkt_loads import dsl from . import FixtureTest class WinterSportsResorts(FixtureTest): def test_heavenly_mountain_resort(self): # Heavenly Mountain Resort NV/CA self.generate_fixtures(dsl.way(317721523, wkt_loads('POLYGON ((-119.943792366025 38.9318315811565, -119.943406090453 38.93208195867911, -119.943148633292 38.93258278095151, -119.941582240931 38.93413511885359, -119.942264870716 38.93522659155598, -119.942130842075 38.93529968189359, -119.94225687571 38.9354414601634, -119.942742415121 38.9360757904203, -119.942739720175 38.93611855405371, -119.94269004334 38.9361504870115, -119.938846421733 38.93782376785669, -119.938751200313 38.93785695776939, -119.938692180999 38.93785905397387, -119.93861169195 38.9378174093664, -119.93753964249 38.93630085820809, -119.937377227086 38.93621351438268, -119.937251283283 38.93615272301678, -119.937196036893 38.93612596082418, -119.936677529311 38.93581005438838, -119.936152823354 38.93555745400288, -119.93555310807 38.9353437037623, -119.934459768538 38.9350131896996, -119.933534234301 38.93479049362628, -119.931754581891 38.93350077123829, -119.929458577857 38.9320318552936, -119.928578857699 38.93244917278829, -119.927505999755 38.93211536091647, -119.92529578483 38.93104711044089, -119.923407615934 38.9301457200956, -119.922098680734 38.9298118973828, -119.918815607865 38.93047961111808, -119.916197827296 38.93128078942348, -119.914137900518 38.9320318552936, -119.911799046844 38.9332003660422, -119.911133844376 38.9339013793977, -119.911176694015 38.93436871778529, -119.912099443475 38.93490285495809, -119.912271021694 38.93575408409009, -119.911477090646 38.93622141026889, -119.91106943517 38.93677216363341, -119.910597280657 38.93727302265959, -119.910318443593 38.93785709751631, -119.910039426865 38.93832447970939, -119.909460103339 38.9387917889493, -119.9087519614 38.93895871430539, -119.908194107609 38.93887521674029, -119.907893710978 38.93867489205539, -119.907893710978 38.9384413073081, -119.907807832036 38.93804065492078, -119.907443116031 38.9382075820451, -119.906584775777 38.93857476443707, -119.90596251278 38.9391422688583, -119.905919573309 38.9396596707408, -119.906434577462 38.93999351697389, -119.907593314347 38.94022709660789, -119.907657633721 38.9418960890882, -119.907078310194 38.94331470530078, -119.906606245512 38.94404901528998, -119.905447508627 38.9449334609636, -119.904310241478 38.9458680591144, -119.902872577697 38.94785396067778, -119.902314723906 38.94997331214239, -119.902314723906 38.9521259930512, -119.902185905494 38.95359442288199, -119.90132756524 38.9553465090758, -119.899439396344 38.95656458198948, -119.897036133465 38.9577660087491, -119.895812987374 38.9580830048665, -119.8950833757 38.95833328969279, -119.894246595013 38.95885048166019, -119.893495603435 38.95975150710918, -119.892572943807 38.9614199700634, -119.892229607705 38.96317186277449, -119.892036469919 38.9642396990966, -119.892250987609 38.9651405861659, -119.892229607705 38.96562448170126, -119.89229392708 38.9662250858957, -119.89184081685 38.96661091151149, -119.891671664082 38.96690907979579, -119.891907741339 38.9672928063521, -119.891006461615 38.9678266954673, -119.890448607823 38.9676432151904, -119.889483008724 38.96717609614698, -119.889332810409 38.96674229010197, -119.889203991997 38.9661750066383, -119.889311340673 38.9658746700424, -119.889933603671 38.96559109525059, -119.890234000302 38.96512396267509, -119.89089920277 38.9636056906835, -119.891242538871 38.9624877629413, -119.891113810291 38.9616036061589, -119.891221069136 38.9606692154917, -119.891735983457 38.95985160481811, -119.892401185925 38.95918416766697, -119.892358336286 38.95836660971048, -119.891800392663 38.9575489724671, -119.891264008606 38.95728205953967, -119.889470342479 38.95780407909378, -119.889400094223 38.9591308702168, -119.887620531646 38.9606618811363, -119.88701542647 38.96071929864108, -119.886672000537 38.96045225795317, -119.886951017264 38.95978489631188, -119.887594660165 38.95868367299398, -119.887895146628 38.95804961486188, -119.888023875208 38.95738216073708, -119.887830737422 38.95636430728829, -119.88862466847 38.95521301366169, -119.889225461732 38.95426191268249, -119.889096822984 38.95299371161568, -119.888860745727 38.9516920948552, -119.888066814679 38.95085768390329, -119.885792280379 38.95010681742608, -119.882530677246 38.94927238781168, -119.880985844452 38.9486717097764, -119.880256232778 38.94696948157569, -119.880342021888 38.94611838707938, -119.881221831877 38.94520049026399, -119.882058612564 38.94459970785727, -119.882530677246 38.94384870522239, -119.882874013347 38.94304759902919, -119.882959892289 38.941962814433, -119.884140009077 38.94032729176409, -119.884590604024 38.93997681771238, -119.885448944278 38.93989332134609, -119.886736409743 38.93992671990438, -119.888324271839 38.939960118447, -119.889590267569 38.9397931954484, -119.890448607823 38.93929249396669, -119.891714603553 38.93862486320029, -119.892057939655 38.93767346976308, -119.892444125395 38.93687236367141, -119.892444125395 38.93640497190737, -119.890234000302 38.93642167200998, -119.888925065101 38.93587091592349, -119.888088194583 38.93473598993108, -119.88722994416 38.93311686169681, -119.887380142476 38.93271631898448, -119.887187004689 38.93221556753409, -119.886693470272 38.93164800768059, -119.886393073641 38.9311807212453, -119.886414543377 38.93024585961498, -119.886457482847 38.9295448101288, -119.887702008842 38.9287937179232, -119.888195543259 38.92847652093838, -119.888302802104 38.92797566967189, -119.88875339705 38.92764190662998, -119.888796336521 38.92695746385529, -119.889010944042 38.9262229768729, -119.889311340673 38.92588906581409, -119.890598806139 38.92528818967369, -119.89128538851 38.92522137875329, -119.892873340438 38.9252046760134, -119.894203745374 38.92522137875329, -119.894804538636 38.92473727724449, -119.895834546941 38.9240361034304, -119.897486728411 38.92305117768679, -119.897744185572 38.92248361439469, -119.897572517521 38.92159881882498, -119.897894383887 38.92111469259788, -119.89879557378 38.92006291052098, -119.899460776248 38.919996164571, -119.899825582085 38.92004627645759, -119.899868521555 38.91956206974588, -119.900297736598 38.91866060335608, -119.900791181184 38.91804289167699, -119.900662362772 38.91744187919207, -119.902915517168 38.91421957148438, -119.903795327157 38.91400261158058, -119.904717896954 38.9139692008148, -119.906284289315 38.91365186765279, -119.907443116031 38.91336808393178, -119.908601763085 38.913084299076, -119.909481483242 38.91303418227301, -119.910339823496 38.91338478945808, -119.910704629333 38.91376880578709, -119.911369831801 38.91430309807468, -119.911906305689 38.91503770845448, -119.9123354309 38.91557199118939, -119.912893284691 38.91622306420259, -119.913687215739 38.91682408700998, -119.914524086258 38.91725819858469, -119.915275077836 38.91747521842948, -119.916004689509 38.9173417215987, -119.916498223927 38.91744187919207, -119.916948818873 38.9176087849025, -119.917592461774 38.91789262155969, -119.91857962044 38.91824320474318, -119.919180413702 38.91871071618591, -119.92018886244 38.91946191514499, -119.920875624475 38.91976237852361, -119.921669465691 38.91986260259158, -119.922098680734 38.91941187273728, -119.922849672311 38.91882750620009, -119.923708012565 38.9182933178678, -119.92495253856 38.91802618724719, -119.926690598972 38.91859378619469, -119.928063943378 38.91941187273728, -119.928857784595 38.92029669557818, -119.929136801322 38.9215654116356, -119.928793375389 38.92271732158768, -119.929630335739 38.9229176913462, -119.930252598736 38.92306788093371, -119.930746133153 38.9226338747895, -119.930831922263 38.92230001672669, -119.931582913841 38.92204960467639, -119.932612922145 38.92189927315498, -119.933342533819 38.92196608720379, -119.934179314506 38.92218309265029, -119.934865986709 38.92218309265029, -119.935402370766 38.92219979610159, -119.936003164028 38.92235005709749, -119.936646986592 38.9226171714404, -119.937140521009 38.9231012874158, -119.937633965594 38.92353529070078, -119.937870042851 38.92365221254888, -119.937977301696 38.92385250977988, -119.937848573116 38.9240361034304, -119.937462297544 38.92431991437778, -119.937011702597 38.924687168671, -119.936754245437 38.92500438259989, -119.936615365894 38.92515715355188, -119.937033172332 38.92547177960891, -119.937483767279 38.92600598378308, -119.938148969747 38.92677380788037, -119.93883564195 38.92755839574049, -119.939500844418 38.92820935877318, -119.940037318306 38.92874361221491, -119.940616641832 38.92931105554499, -119.940680961207 38.92977849406069, -119.940874098993 38.93036270052237, -119.941324693939 38.93072999341319, -119.941560771196 38.93084676364278, -119.942290293038 38.93101370770039, -119.942719508081 38.93121398414699, -119.943234512233 38.93143096130588, -119.943706487083 38.9315812726297, -119.943792366025 38.9318315811565))'), {u'way_area': u'1.8992e+07', u'source': u'openstreetmap.org', u'landuse': u'winter_sports', u'name': u'Heavenly Mountain Resort'})) # noqa self.assert_has_feature( 15, 5467, 12531, 'landuse', {'kind': 'winter_sports', 'sort_rank': 39})
581.125
8,920
0.824801
1,029
9,298
7.441205
0.494655
0.006269
0.00862
0.007836
0
0
0
0
0
0
0
0.839614
0.063885
9,298
15
8,921
619.866667
0.040097
0.006345
0
0
0
0.1
0.957011
0
0
0
0
0
0.1
1
0.1
false
0
0.3
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6735984150c8e3f3481d49e775155f7a88b53640
894
py
Python
17_regex/python/iso_8601.py
pjuangph/python2rust
cc99abe8738e5d1d7d9a34debb2892186ff77965
[ "CC0-1.0" ]
24
2021-07-09T13:56:45.000Z
2022-03-26T19:44:00.000Z
17_regex/python/iso_8601.py
pjuangph/python2rust
cc99abe8738e5d1d7d9a34debb2892186ff77965
[ "CC0-1.0" ]
null
null
null
17_regex/python/iso_8601.py
pjuangph/python2rust
cc99abe8738e5d1d7d9a34debb2892186ff77965
[ "CC0-1.0" ]
3
2021-07-09T17:16:31.000Z
2022-03-24T15:44:44.000Z
"Regex to check for ISO 8601 conform dates." import re def is_8601_date(line): # optionally use functools.cache RE = re.compile( r"^(?:(?=[02468][048]00|[13579][26]00|[0-9][0-9]0[48]|[0-9][0-9][2468][048]|[0-9][0-9][13579][26])\d{4}(?:(-|)(?:(?:00[1-9]|0[1-9][0-9]|[1-2][0-9][0-9]|3[0-5][0-9]|36[0-6])|(?:01|03|05|07|08|10|12)(?:\1(?:0[1-9]|[12][0-9]|3[01]))?|(?:04|06|09|11)(?:\1(?:0[1-9]|[12][0-9]|30))?|02(?:\1(?:0[1-9]|[12][0-9]))?|W(?:0[1-9]|[1-4][0-9]|5[0-3])(?:\1[1-7])?))?)$|^(?:(?![02468][048]00|[13579][26]00|[0-9][0-9]0[48]|[0-9][0-9][2468][048]|[0-9][0-9][13579][26])\d{4}(?:(-|)(?:(?:00[1-9]|0[1-9][0-9]|[1-2][0-9][0-9]|3[0-5][0-9]|36[0-5])|(?:01|03|05|07|08|10|12)(?:\2(?:0[1-9]|[12][0-9]|3[01]))?|(?:04|06|09|11)(?:\2(?:0[1-9]|[12][0-9]|30))?|(?:02)(?:\2(?:0[1-9]|1[0-9]|2[0-8]))?|W(?:0[1-9]|[1-4][0-9]|5[0-3])(?:\2[1-7])?))?)$" ) return RE.match(line)
81.272727
722
0.448546
226
894
1.765487
0.243363
0.140351
0.075188
0.080201
0.666667
0.666667
0.666667
0.566416
0.511278
0.511278
0
0.340828
0.05481
894
10
723
89.4
0.131361
0.082774
0
0
0
0.142857
0.87355
0.824826
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.428571
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5