hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6949c8d294973b9c15f0cdaf6df462ee0fe3f120
| 22
|
py
|
Python
|
utils/db_api/__init__.py
|
zotov-vs/tg_shop
|
e640e7cfaeac0af1de33a62fb5e6da28d8843651
|
[
"MIT"
] | 1
|
2021-12-16T10:41:16.000Z
|
2021-12-16T10:41:16.000Z
|
utils/db_api/__init__.py
|
zotov-vs/tg_shop
|
e640e7cfaeac0af1de33a62fb5e6da28d8843651
|
[
"MIT"
] | 6
|
2021-10-11T06:03:48.000Z
|
2021-10-17T09:42:05.000Z
|
App(BE)/main/models/__init__.py
|
osamhack2021/AI_APP_handylib_devlib
|
62cf67e6df280217e3715e2aa425636cefa7dd6f
|
[
"MIT"
] | null | null | null |
from . import database
| 22
| 22
| 0.818182
| 3
| 22
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
15fb43831ed30c20a10a9d980a0fdb47897df9ba
| 30
|
py
|
Python
|
SampleAIs/Sample_Sophie/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 4
|
2015-11-04T21:18:40.000Z
|
2020-12-26T21:15:23.000Z
|
SampleAIs/Sample_Sophie/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 2
|
2021-08-09T18:19:58.000Z
|
2021-08-10T14:44:54.000Z
|
SampleAIs/Sample_Sophie/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 6
|
2015-08-01T17:54:17.000Z
|
2022-02-28T00:00:21.000Z
|
from .sophie import SophieAI
| 10
| 28
| 0.8
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 30
| 2
| 29
| 15
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c6013d78d67df4cefccabade66230bcf85da9cde
| 343
|
py
|
Python
|
ex109/teste.py
|
almmessias/CursoPython
|
4cec6946f32002cbd5d3b802df11ea1ba74169f5
|
[
"MIT"
] | null | null | null |
ex109/teste.py
|
almmessias/CursoPython
|
4cec6946f32002cbd5d3b802df11ea1ba74169f5
|
[
"MIT"
] | null | null | null |
ex109/teste.py
|
almmessias/CursoPython
|
4cec6946f32002cbd5d3b802df11ea1ba74169f5
|
[
"MIT"
] | null | null | null |
import moeda
n = float(input('Digite o preço: R$'))
print (f'O dobro de {moeda.moeda(n)} é {moeda.dobro(n, True)}')
print (f'A metade de {moeda.moeda(n)} é {moeda.metade(n, True)}')
print (f'O aumento de 10% de {moeda.moeda(n)} é {moeda.aumento(n, 10, True)}')
print (f'O desconto de 13% de {moeda.moeda(n)} é {moeda.desconto(n, 13, True)}')
| 42.875
| 80
| 0.650146
| 67
| 343
| 3.328358
| 0.313433
| 0.134529
| 0.215247
| 0.233184
| 0.340807
| 0.340807
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.137026
| 343
| 7
| 81
| 49
| 0.726351
| 0
| 0
| 0
| 0
| 0.333333
| 0.758017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ba3469f6edcb9686d5729fdce8d6db4a402b74d8
| 148
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/msdp/ios/msdp.py
|
jbronikowski/genielibs
|
200a34e5fe4838a27b5a80d5973651b2e34ccafb
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
# super class
from genie.libs.ops.msdp.iosxe.msdp import Msdp as MsdpXE
class Msdp(MsdpXE):
'''
Msdp Ops Object
'''
pass
| 18.5
| 58
| 0.601351
| 20
| 148
| 4.45
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 148
| 8
| 59
| 18.5
| 0.855769
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ba3faff0153dfc6fc7c4666344478aeca4379d35
| 246
|
py
|
Python
|
gamla/data.py
|
0xnurl/gamla
|
f3903ef5138a6fd94b910abf6ee7665e744d8537
|
[
"MIT"
] | null | null | null |
gamla/data.py
|
0xnurl/gamla
|
f3903ef5138a6fd94b910abf6ee7665e744d8537
|
[
"MIT"
] | null | null | null |
gamla/data.py
|
0xnurl/gamla
|
f3903ef5138a6fd94b910abf6ee7665e744d8537
|
[
"MIT"
] | null | null | null |
import dataclasses
import json
import dataclasses_json
def get_encode_config():
return dataclasses.field(
metadata=dataclasses_json.config(
encoder=lambda lst: sorted(lst, key=json.dumps, reverse=False)
)
)
| 18.923077
| 74
| 0.691057
| 28
| 246
| 5.928571
| 0.642857
| 0.204819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231707
| 246
| 12
| 75
| 20.5
| 0.878307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0
| 0.333333
| 0.111111
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
baa96ababbc004f7b0ec9bc773951f114fc9b91e
| 86
|
py
|
Python
|
train/__init__.py
|
SeJV/ComparisonRLapproaches
|
e1988a97ed5fab10c847350d607e9feafeced61e
|
[
"MIT"
] | 2
|
2020-12-14T12:59:40.000Z
|
2020-12-14T14:08:30.000Z
|
train/__init__.py
|
SeJV/ComparisonRLapproaches
|
e1988a97ed5fab10c847350d607e9feafeced61e
|
[
"MIT"
] | null | null | null |
train/__init__.py
|
SeJV/ComparisonRLapproaches
|
e1988a97ed5fab10c847350d607e9feafeced61e
|
[
"MIT"
] | null | null | null |
from train.train_agent import train_agent
from train.train_agents import train_agents
| 28.666667
| 43
| 0.883721
| 14
| 86
| 5.142857
| 0.357143
| 0.25
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 2
| 44
| 43
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
baac3262872d073b7970c4a5798360c41e0f8d75
| 12,340
|
py
|
Python
|
snooper/db_hadler.py
|
tehreem09/web-snooper
|
bd02ef0aa38881321da8dc76b28560a7381b3841
|
[
"MIT"
] | null | null | null |
snooper/db_hadler.py
|
tehreem09/web-snooper
|
bd02ef0aa38881321da8dc76b28560a7381b3841
|
[
"MIT"
] | null | null | null |
snooper/db_hadler.py
|
tehreem09/web-snooper
|
bd02ef0aa38881321da8dc76b28560a7381b3841
|
[
"MIT"
] | null | null | null |
import json
def search_records():
cleaned_data = open('lifetech_cleandata.json')
data = json.load(cleaned_data)
my_dic={}
for record in data:
number = record.get("number")
cnic = record.get("cnic")
my_dic=record
my_dic= basic_info_merger(my_dic)
result01 = search_taxpayers_record(cnic)
result02 = search_redbook_record(cnic, number)
result03 = search_terrorists_record(cnic, number)
print(f'[+] searching for number > {number}')
result = {}
if result01 is not None:
result = merge_found_records(my_dic, result01)
if result02 and result03 is not None:
result = merge_found_records(my_dic, result01, result02, result03)
elif result02 is not None:
result = merge_found_records(my_dic, result01, result02)
elif result03 is not None:
result = merge_found_records(my_dic, result01, result03)
if result02 is not None:
result = result02
if result03 is not None:
result = merge_found_records(my_dic ,result02, result03)
print(result)
elif result03 is not None:
result = merge_found_records(my_dic, result03)
else:
result = (my_dic)
main_dbt_handler(number, result)
# def basic_info_merger(dict):
# with open ('basic_number_info.json', 'r') as basic_num_info:
# num_info = json.load(basic_num_info)
# for data in num_info:
# # print(data)
# number = str(data.get('number'))[2:-2]
# # print(number)
# number2 = '+92'+ dict['number']
# # print ("with" + number2)
# if ('+92'+dict['number'])==number:
# print("number matcheddd")
# new_dict = merge_found_records(dict, data)
# return new_dict
# return dict
def basic_info_merger(dict):
with open ('basic_number_info.json', 'r') as basic_num_info:
num_info = json.load(basic_num_info)
for data in num_info:
# print(data)
number = data.get('number')
# print(number)
number2 = '+92'+ dict['number']
# print ("with" + number2)
if ('+92'+dict['number'])==number:
print("number matcheddd")
new_dict = merge_found_records(dict, data)
return new_dict
return dict
def merge_found_records(*dicts):
return {
k: [d[k] for d in dicts if k in d]
for k in set(k for d in dicts for k in d)
}
def search_taxpayers_record(cnic):
with open('snooper/sheet7.json', 'r') as tax_payers:
tax_payers = json.load(tax_payers)
for records in tax_payers['Sheet1']:
tax_payers_dictionary = {}
if cnic == records['NTN']:
# tax_payers_dictionary['CNIC'] = cnic
tax_payers_dictionary['BUSINESS_NAME'] = records['BUSINESS_NAME']
tax_payers_dictionary['NAME REGISTERED TO'] = records['NAME']
return tax_payers_dictionary
def search_redbook_record(cnic, number):
with open('snooper/redbook.json', 'r') as redbook:
redbook = json.load(redbook)
for data2 in redbook:
redbook_dictionary = {}
if cnic == (data2['CNIC']):
# redbook_dictionary['CNIC'] = cnic
redbook_dictionary['F/NAME'] = data2['PARENTAGE']
redbook_dictionary['ADDRESS'] = data2['ADDRESS']
redbook_dictionary['PHONE NUM'] = data2['PHONE NUM']
redbook_dictionary['FIR'] = data2['FIR no.']
return redbook_dictionary
def search_terrorists_record(cnic, number):
with open('snooper/data.json', 'r') as terrorists:
terrorists = json.load(terrorists)
for data2 in terrorists:
terrorists_dictionary = {}
if cnic == (data2['CNIC']):
# terrorists_dictionary['CNIC'] = cnic
terrorists_dictionary['F/NAME'] = data2['FNAME']
terrorists_dictionary['ADDRESS'] = data2['ADDRESS']
terrorists_dictionary['REWARD'] = data2['REWARD']
terrorists_dictionary['FIR'] = data2['FIR']
terrorists_dictionary['RELIGIOUS/POLITICAL AFFILIATION'] = data2['RELIGIOUS/POLITICAL AFFILIATION']
return terrorists_dictionary
def main_dbt_handler(number, record):
if record:
with open('main_dbt.json', 'a+') as main_dbt:
json.dump(record, main_dbt, indent=4)
main_dbt.write('\n')
main_dbt.close()
print(str(record)+'\n')
else:
print('[-] No criminal record found....\n[-] No business or tax payers record fount....\n')
search_records()
# import json
# def search_records():
# cleaned_data = open('lifetech_cleandata.json')
# data = json.load(cleaned_data)
# my_dic={}
# for record in data:
# number = record.get("number")
# cnic = record.get("cnic")
# my_dic=record
# lifetech_dic = {}
# lifetech_dic['NAME'] = record['name']
# lifetech_dic['CNIC'] = record['cnic']
# lifetech_dic['PHONE NUM'] = record['number']
# if 'city' in my_dic:
# lifetech_dic['CITY'] = record['city']
# if 'address'in my_dic:
# lifetech_dic['ADDRESS'] = record['address']
# result01 = search_taxpayers_record(cnic)
# result02 = search_redbook_record(cnic, number)
# result03 = search_terrorists_record(cnic, number)
# print(f'[+] searching for number > {number}')
# result = {}
# if result01 is not None:
# result = merge_found_records(lifetech_dic, result01)
# if result02 and result03 is not None:
# result = merge_found_records(lifetech_dic, result01, result02, result03)
# elif result02 is not None:
# result = merge_found_records(lifetech_dic, result01, result02)
# elif result03 is not None:
# result = merge_found_records(lifetech_dic, result01, result03)
# elif result02 is not None:
# result = result02
# if result03 is not None:
# result = merge_found_records(lifetech_dic ,result02, result03)
# print(result)
# elif result03 is not None:
# result = merge_found_records(lifetech_dic, result03)
# else:
# result= lifetech_dic
# main_dbt_handler(number, result)
# def merge_found_records(*dicts):
# return {
# k: [d[k] for d in dicts if k in d]
# for k in set(k for d in dicts for k in d)
# }
# def search_taxpayers_record(cnic):
# with open('snooper/sheet7.json', 'r') as tax_payers:
# tax_payers = json.load(tax_payers)
# for records in tax_payers['Sheet1']:
# tax_payers_dictionary = {}
# if cnic == records['NTN']:
# # tax_payers_dictionary['CNIC'] = cnic
# tax_payers_dictionary['BUSINESS_NAME'] = records['BUSINESS_NAME']
# tax_payers_dictionary['NAME REGISTERED TO'] = records['NAME']
# return tax_payers_dictionary
# def search_redbook_record(cnic, number):
# with open('snooper/redbook.json', 'r') as redbook:
# redbook = json.load(redbook)
# for data2 in redbook:
# redbook_dictionary = {}
# if cnic == (data2['CNIC']):
# # redbook_dictionary['CNIC'] = cnic
# redbook_dictionary['F/NAME'] = data2['PARENTAGE']
# redbook_dictionary['ADDRESS'] = data2['ADDRESS']
# redbook_dictionary['PHONE NUM'] = data2['PHONE NUM']
# redbook_dictionary['FIR'] = data2['FIR no.']
# return redbook_dictionary
# def search_terrorists_record(cnic, number):
# with open('snooper/data.json', 'r') as terrorists:
# terrorists = json.load(terrorists)
# for data2 in terrorists:
# terrorists_dictionary = {}
# if cnic == (data2['CNIC']):
# # terrorists_dictionary['CNIC'] = cnic
# terrorists_dictionary['F/NAME'] = data2['FNAME']
# terrorists_dictionary['ADDRESS'] = data2['ADDRESS']
# terrorists_dictionary['REWARD'] = data2['REWARD']
# terrorists_dictionary['FIR'] = data2['FIR']
# terrorists_dictionary['RELIGIOUS/POLITICAL AFFILIATION'] = data2['RELIGIOUS/POLITICAL AFFILIATION']
# return terrorists_dictionary
# def main_dbt_handler(number, record):
# if record:
# with open('main_dbt.json', 'a+') as main_dbt:
# json.dump(record, main_dbt, indent=4)
# main_dbt.write('\n')
# main_dbt.close()
# print(str(record)+'\n')
# else:
# print('[-] No criminal record found....\n[-] No business or tax payers record fount....\n')
# search_records()
# import json
# def search_records():
# cleaned_data = open('lifetech_cleandata.json')
# data = json.load(cleaned_data)
# for record in data:
# number = record.get("number")
# cnic = record.get("cnic")
# result01 = search_taxpayers_record(cnic)
# result02 = search_redbook_record(cnic, number)
# result03 = search_terrorists_record(cnic, number)
# print(f'[+] searching for number > {number}')
# result = {}
# if result01 is not None:
# result = result01
# if result02 and result03 is not None:
# result = merge_found_records(result01, result02, result03)
# elif result02 is not None:
# result = merge_found_records(result01, result02)
# elif result03 is not None:
# result = merge_found_records(result01, result03)
# elif result02 is not None:
# result = result02
# if result03 is not None:
# result = merge_found_records(result02, result03)
# elif result03 is not None:
# result = merge_found_records(result03)
# main_dbt_handler(number, result)
# def merge_found_records(*dicts):
# return {
# k: [d[k] for d in dicts if k in d]
# for k in set(k for d in dicts for k in d)
# }
# def search_taxpayers_record(cnic):
# with open('sheet7.json', 'r') as tax_payers:
# tax_payers = json.load(tax_payers)
# for records in tax_payers['Sheet1']:
# tax_payers_dictionary = {}
# if cnic == records['NTN']:
# # tax_payers_dictionary['CNIC'] = cnic
# tax_payers_dictionary['BUSINESS_NAME'] = records['BUSINESS_NAME']
# tax_payers_dictionary['NAME REGISTERED TO'] = records['NAME']
# return tax_payers_dictionary
# def search_redbook_record(cnic, number):
# with open('redbook.json', 'r') as redbook:
# redbook = json.load(redbook)
# for data2 in redbook:
# redbook_dictionary = {}
# if cnic == (data2['CNIC']):
# # redbook_dictionary['CNIC'] = cnic
# redbook_dictionary['F/NAME'] = data2['PARENTAGE']
# redbook_dictionary['ADDRESS'] = data2['ADDRESS']
# redbook_dictionary['PHONE NUM'] = data2['PHONE NUM']
# redbook_dictionary['FIR'] = data2['FIR no.']
# return redbook_dictionary
# def search_terrorists_record(cnic, number):
# with open('data.json', 'r') as terrorists:
# terrorists = json.load(terrorists)
# for data2 in terrorists:
# terrorists_dictionary = {}
# if cnic == (data2['CNIC']):
# # terrorists_dictionary['CNIC'] = cnic
# terrorists_dictionary['F/NAME'] = data2['FNAME']
# terrorists_dictionary['ADDRESS'] = data2['ADDRESS']
# terrorists_dictionary['REWARD'] = data2['REWARD']
# terrorists_dictionary['FIR'] = data2['FIR']
# return terrorists_dictionary
# def main_dbt_handler(number, record):
# if record:
# with open('main_dbt.json', 'a+') as main_dbt:
# json.dump(record, main_dbt, indent=4)
# main_dbt.write('\n')
# main_dbt.close()
# print(str(record)+'\n')
# else:
# print('[-] No criminal record found....\n[-] No business or tax payers record fount....\n')
# search_records()
| 33.172043
| 113
| 0.587439
| 1,390
| 12,340
| 5.022302
| 0.066906
| 0.038676
| 0.053574
| 0.045122
| 0.960321
| 0.955164
| 0.955164
| 0.95058
| 0.95058
| 0.944277
| 0
| 0.022722
| 0.290276
| 12,340
| 371
| 114
| 33.261456
| 0.774378
| 0.644003
| 0
| 0.065934
| 0
| 0
| 0.123421
| 0.010722
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.010989
| 0.010989
| 0.153846
| 0.054945
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bad12262ef46a460162421fa3fc24f38cea101a7
| 25
|
py
|
Python
|
testsuite/modulegraph-dir/package_with_star_import/__init__.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 9
|
2020-03-22T14:48:01.000Z
|
2021-05-30T12:18:12.000Z
|
testsuite/modulegraph-dir/package_with_star_import/__init__.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 15
|
2020-01-06T10:02:32.000Z
|
2021-05-28T12:22:44.000Z
|
testsuite/modulegraph-dir/package_with_star_import/__init__.py
|
ronaldoussoren/modulegraph2
|
b6ab1766b0098651b51083235ff8a18a5639128b
|
[
"MIT"
] | 4
|
2020-05-10T18:51:41.000Z
|
2021-04-07T14:03:12.000Z
|
from no_imports import *
| 12.5
| 24
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
baf466052d1cb9e8f5732f162eeb87ffaa0dea2b
| 65
|
py
|
Python
|
app/db/models/__init__.py
|
EleutherAGI/eegi-backend
|
6e013a4928f1cdea4ef495e82fe641f917708cde
|
[
"MIT"
] | null | null | null |
app/db/models/__init__.py
|
EleutherAGI/eegi-backend
|
6e013a4928f1cdea4ef495e82fe641f917708cde
|
[
"MIT"
] | 2
|
2021-05-15T15:33:31.000Z
|
2021-05-28T15:55:21.000Z
|
app/db/models/__init__.py
|
EleutherAGI/eegi-backend
|
6e013a4928f1cdea4ef495e82fe641f917708cde
|
[
"MIT"
] | 2
|
2021-05-15T15:08:25.000Z
|
2021-05-16T16:05:55.000Z
|
from .users import *
from .summaries import *
from .keys import *
| 21.666667
| 24
| 0.738462
| 9
| 65
| 5.333333
| 0.555556
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169231
| 65
| 3
| 25
| 21.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2418d43fc6f298cdbf67fbaa5c14936b1a133fb3
| 178
|
py
|
Python
|
paa191t1/tests/pph/test_pph_median.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
paa191t1/tests/pph/test_pph_median.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
paa191t1/tests/pph/test_pph_median.py
|
dmmoura/PAA-2021
|
435005f6494ece76f03807fb524e0d4a3e1d7222
|
[
"Apache-2.0"
] | null | null | null |
from paa191t1.pph.pph_median import pph_median
from paa191t1.tests.pph import TestPPHBase
class TestPPHMedian(TestPPHBase):
def setUp(self):
self.pph = pph_median
| 19.777778
| 46
| 0.764045
| 24
| 178
| 5.541667
| 0.5
| 0.203008
| 0.180451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 0.168539
| 178
| 8
| 47
| 22.25
| 0.844595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
242c4a7387a45199ab823721a5e84a8b672f4b3c
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/polynomial/setup.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/polynomial/setup.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/polynomial/setup.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/75/74/1f/cd550c3fd39c07a88abf9ca8d462c4c05077809e3ca61220a3837e78cd
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2454b360d09c5f23627ef46676a5ffb7718dbdfb
| 69
|
py
|
Python
|
Reading Data/lesson-12-vancouver-crime-information/tests/test_late_crimes_2.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | null | null | null |
Reading Data/lesson-12-vancouver-crime-information/tests/test_late_crimes_2.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | 2
|
2022-01-11T21:04:51.000Z
|
2022-01-11T21:05:05.000Z
|
Reading Data/lesson-12-vancouver-crime-information/tests/test_late_crimes_2.py
|
danielgarm/Data-Science-and-Machine-Learning
|
fa3e85cc42eb2e9f964ab5abb34d1c93e16d1cd9
|
[
"MIT"
] | null | null | null |
def test_late_crimes_2():
assert late_crimes.loc[7, 'HOUR'] == 20
| 34.5
| 43
| 0.695652
| 12
| 69
| 3.666667
| 0.833333
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.144928
| 69
| 2
| 43
| 34.5
| 0.677966
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0337e71422a570efc9ae28097c6195b11be45c20
| 8,509
|
py
|
Python
|
tests/executors/multicore_executor_test.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 52
|
2021-09-24T17:52:34.000Z
|
2022-03-29T22:55:02.000Z
|
tests/executors/multicore_executor_test.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 90
|
2021-09-29T04:23:29.000Z
|
2022-03-31T21:23:02.000Z
|
tests/executors/multicore_executor_test.py
|
allenai/tango
|
80c90caefae4ad1c3f8472718ddada912cd8fcf9
|
[
"Apache-2.0"
] | 8
|
2021-11-13T01:56:22.000Z
|
2022-02-27T03:29:42.000Z
|
import time
import pytest
from tango.common.logging import initialize_logging
from tango.common.testing import TangoTestCase
from tango.executors.multicore_executor import MulticoreExecutor
from tango.step_graph import StepGraph
from tango.workspaces import LocalWorkspace
from test_fixtures.package.steps import SleepPrintMaybeFail
class TestMulticoreExecutor(TangoTestCase):
def setup_method(self):
super().setup_method()
initialize_logging()
def test_simple_execution_in_parallel(self):
step_graph = StepGraph(
{
"step1": SleepPrintMaybeFail(string="hello", seconds=5, fail=False),
"step2": SleepPrintMaybeFail(string="hi", seconds=5, fail=False),
}
)
executor = MulticoreExecutor(workspace=LocalWorkspace(self.TEST_DIR), parallelism=2)
start_time = time.time()
executor.execute_step_graph(step_graph)
end_time = time.time()
time_taken = end_time - start_time
assert time_taken < 10 # TODO: will this be flaky?
assert len(executor.workspace.step_cache) == 2
def test_more_processes_ready_than_parallelism(self):
step_graph = StepGraph(
{
"step1": SleepPrintMaybeFail(string="hello", seconds=5, fail=False),
"step2": SleepPrintMaybeFail(string="hi", seconds=5, fail=False),
"step3": SleepPrintMaybeFail(string="howdy", seconds=5, fail=False),
}
)
executor = MulticoreExecutor(workspace=LocalWorkspace(self.TEST_DIR), parallelism=2)
start_time = time.time()
executor.execute_step_graph(step_graph)
end_time = time.time()
time_taken = end_time - start_time
assert 10 < time_taken < 20 # TODO: will this be flaky?
assert len(executor.workspace.step_cache) == 3
@pytest.mark.parametrize("parallelism", [1, 2, 3])
def test_failing_step_no_downstream_task(self, parallelism):
step_graph = StepGraph.from_params(
{
"step1": {
"type": "sleep-print-maybe-fail",
"string": "string_to_pass_down",
"seconds": 0,
"fail": False,
},
"step2": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step1"},
"seconds": 0,
"fail": False,
},
"step3": {
"type": "sleep-print-maybe-fail",
"string": "This is going to fail!",
"seconds": 0,
"fail": True,
},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=parallelism,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 2
@pytest.mark.parametrize("parallelism", [1, 2, 3])
def test_failing_step_with_downstream_task(self, parallelism):
step_graph = StepGraph.from_params(
{
"step1": {
"type": "sleep-print-maybe-fail",
"string": "string_to_pass_down",
"seconds": 0,
"fail": True,
},
"step2": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step1"},
"seconds": 0,
"fail": False,
},
"step3": {
"type": "sleep-print-maybe-fail",
"string": "This is going to fail!",
"seconds": 0,
"fail": False,
},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=parallelism,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 1
@pytest.mark.parametrize("parallelism", [1, 2, 3])
def test_failing_step_with_further_downstream_task(self, parallelism):
step_graph = StepGraph.from_params(
{
"step1": {
"type": "sleep-print-maybe-fail",
"string": "string_to_pass_down",
"seconds": 0,
"fail": True,
},
"step2": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step1"},
"seconds": 0,
"fail": False,
},
"step3": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step2"},
"seconds": 0,
"fail": False,
},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=parallelism,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 0
def test_uncacheable_failing_step_no_downstream_task(self):
step_graph = StepGraph.from_params(
{
"step1": {
"type": "sleep-print-maybe-fail",
"string": "string_to_pass_down",
"seconds": 0,
"fail": False,
},
"step2": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step1"},
"seconds": 0,
"fail": False,
},
"step3": {
"type": "sleep-print-maybe-fail",
"string": "This is going to fail!",
"seconds": 0,
"fail": True,
"cache_results": False,
},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=2,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 2
def test_uncacheable_failing_step_with_downstream_task(self):
step_graph = StepGraph.from_params(
{
"step1": {
"type": "sleep-print-maybe-fail",
"string": "string_to_pass_down",
"seconds": 0,
"fail": True,
"cache_results": False,
},
"step2": {
"type": "sleep-print-maybe-fail",
"string": {"type": "ref", "ref": "step1"},
"seconds": 0,
"fail": False,
},
"step3": {
"type": "sleep-print-maybe-fail",
"string": "This is going to fail!",
"seconds": 0,
"fail": False,
},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=2,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 1
@pytest.mark.parametrize("parallelism", [1, 2, 3])
def test_steps_with_their_own_multiprocessing(self, parallelism):
step_graph = StepGraph.from_params(
{
"step1": {"type": "multiprocessing_step", "num_proc": 2},
"step2": {"type": "multiprocessing_step", "num_proc": 3},
"step3": {"type": "multiprocessing_step", "num_proc": 1},
}
)
executor = MulticoreExecutor(
workspace=LocalWorkspace(self.TEST_DIR),
parallelism=parallelism,
include_package=["test_fixtures.package.steps"],
)
executor.execute_step_graph(step_graph)
assert len(executor.workspace.step_cache) == 3
| 35.161157
| 92
| 0.497943
| 743
| 8,509
| 5.500673
| 0.133244
| 0.055053
| 0.051382
| 0.069733
| 0.862246
| 0.834108
| 0.826768
| 0.820406
| 0.820406
| 0.801566
| 0
| 0.015673
| 0.385122
| 8,509
| 241
| 93
| 35.307054
| 0.765482
| 0.005994
| 0
| 0.65566
| 0
| 0
| 0.163217
| 0.05819
| 0
| 0
| 0
| 0.004149
| 0.04717
| 1
| 0.042453
| false
| 0.023585
| 0.037736
| 0
| 0.084906
| 0.070755
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cee4ceb0af01d56f99e0400de26b6a95df4511da
| 70
|
py
|
Python
|
torchrl/runners/__init__.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:00:32.000Z
|
2020-07-19T03:18:28.000Z
|
torchrl/runners/__init__.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | null | null | null |
torchrl/runners/__init__.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | null | null | null |
from .base_runner import BaseRunner
from .gym_runner import GymRunner
| 23.333333
| 35
| 0.857143
| 10
| 70
| 5.8
| 0.7
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 2
| 36
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
30679d7bee3f8456ed467600e053b128e8b6036a
| 49
|
py
|
Python
|
gather_agent/handlers/__init__.py
|
burmanm/gather_agent
|
37d9eb80cf717d12a132ff1c98a0c80eeeaa5a66
|
[
"Apache-2.0"
] | null | null | null |
gather_agent/handlers/__init__.py
|
burmanm/gather_agent
|
37d9eb80cf717d12a132ff1c98a0c80eeeaa5a66
|
[
"Apache-2.0"
] | null | null | null |
gather_agent/handlers/__init__.py
|
burmanm/gather_agent
|
37d9eb80cf717d12a132ff1c98a0c80eeeaa5a66
|
[
"Apache-2.0"
] | null | null | null |
from rhqmetrics_handler import RHQMetricsHandler
| 24.5
| 48
| 0.918367
| 5
| 49
| 8.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.977778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
061872db87a6479a4c69671bb3e56ad3b315b346
| 152
|
py
|
Python
|
pyblnet/__init__.py
|
henfri/pyblnet
|
0a3a59ea39ab569d4b59be5a918736dc238bcf13
|
[
"MIT"
] | null | null | null |
pyblnet/__init__.py
|
henfri/pyblnet
|
0a3a59ea39ab569d4b59be5a918736dc238bcf13
|
[
"MIT"
] | null | null | null |
pyblnet/__init__.py
|
henfri/pyblnet
|
0a3a59ea39ab569d4b59be5a918736dc238bcf13
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .blnet_web import BLNETWeb, test_blnet
from .blnet_conn import BLNETDirect
from .blnet import BLNET
| 21.714286
| 43
| 0.743421
| 23
| 152
| 4.782609
| 0.652174
| 0.245455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.138158
| 152
| 6
| 44
| 25.333333
| 0.832061
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06499d2a878e935b1bbb4ea1ae0081bd6e6ed4b7
| 75
|
py
|
Python
|
.metadata/.plugins/org.python.pydev.shared_interactive_console/history.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | null | null | null |
.metadata/.plugins/org.python.pydev.shared_interactive_console/history.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | null | null | null |
.metadata/.plugins/org.python.pydev.shared_interactive_console/history.py
|
fullerene12/VOTA
|
3a5cfc1e210ac7ea274537a8d189b54660416599
|
[
"MIT"
] | 1
|
2021-08-01T22:39:18.000Z
|
2021-08-01T22:39:18.000Z
|
import sys; print('%s %s' % (sys.executable or sys.platform, sys.version))
| 37.5
| 74
| 0.693333
| 12
| 75
| 4.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 75
| 1
| 75
| 75
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0651ab151a7c92bb5c33655beaba51093024c9dc
| 341
|
py
|
Python
|
opytimizer/spaces/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 528
|
2018-10-01T20:00:09.000Z
|
2022-03-27T11:15:31.000Z
|
opytimizer/spaces/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 17
|
2019-10-30T00:47:03.000Z
|
2022-03-21T11:39:28.000Z
|
opytimizer/spaces/__init__.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 35
|
2018-10-01T20:03:23.000Z
|
2022-03-20T03:54:15.000Z
|
"""Customizable space module that provides different search spaces
implementations.
"""
from opytimizer.spaces.boolean import BooleanSpace
from opytimizer.spaces.grid import GridSpace
from opytimizer.spaces.hyper_complex import HyperComplexSpace
from opytimizer.spaces.search import SearchSpace
from opytimizer.spaces.tree import TreeSpace
| 34.1
| 66
| 0.859238
| 40
| 341
| 7.3
| 0.55
| 0.239726
| 0.342466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 341
| 9
| 67
| 37.888889
| 0.941935
| 0.234604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06b0e4b7f2071c5642bd956f75e4b9df9624fc3e
| 9,079
|
py
|
Python
|
tests/location/test_location_utility.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | null | null | null |
tests/location/test_location_utility.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | 1
|
2022-01-17T04:25:49.000Z
|
2022-01-17T04:25:49.000Z
|
tests/location/test_location_utility.py
|
questionlp/wwdtm
|
f3cf3399c22bf19e369e6e0250e7c72de0be3a90
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2021 Linh Pham
# wwdtm is released under the terms of the Apache License 2.0
"""Testing for object: :py:class:`wwdtm.location.LocationUtility`
"""
import json
from typing import Any, Dict
import pytest
from wwdtm.location import LocationUtility
@pytest.mark.skip
def get_connect_dict() -> Dict[str, Any]:
"""Read in database connection settings and return values as a
dictionary.
:return: A dictionary containing database connection settings
for use by mysql.connector
"""
with open("config.json", "r") as config_file:
config_dict = json.load(config_file)
if "database" in config_dict:
return config_dict["database"]
@pytest.mark.parametrize("location_id", [95])
def test_location_utility_convert_id_to_slug(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.convert_id_to_slug`
:param location_id: Location ID to test converting into location
slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.convert_id_to_slug(location_id)
assert slug, f"Location slug for ID {location_id} was not found"
@pytest.mark.parametrize("location_id", [-1])
def test_location_utility_convert_invalid_id_to_slug(location_id: int):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.convert_id_to_slug`
:param location_id: Location ID to test failing to convert into
location slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.convert_id_to_slug(location_id)
assert not slug, f"Location slug for ID {location_id} was found"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-il"])
def test_location_utility_convert_slug_to_id(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.convert_slug_to_id`
:param location_slug: Location slug string to test converting into
location ID
"""
utility = LocationUtility(connect_dict=get_connect_dict())
id_ = utility.convert_slug_to_id(location_slug)
assert id_, f"Location ID for slug {location_slug} was not found"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-li"])
def test_location_utility_convert_invalid_slug_to_id(location_slug: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.convert_slug_to_id`
:param location_slug: Location slug string to test failing to
convert into location ID
"""
utility = LocationUtility(connect_dict=get_connect_dict())
id_ = utility.convert_slug_to_id(location_slug)
assert not id_, f"Location ID for slug {location_slug} was found"
@pytest.mark.parametrize("location_id", [95])
def test_location_utility_id_exists(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.id_exists`
:param location_id: Location ID to test if a location exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.id_exists(location_id)
assert result, f"Location ID {location_id} does not exist"
@pytest.mark.parametrize("location_id", [-1])
def test_location_utility_id_not_exists(location_id: int):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.id_exists`
:param location_id: Location ID to test if a location does not exist
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.id_exists(location_id)
assert not result, f"Location ID {location_id} exists"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-il"])
def test_location_utility_slug_exists(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slug_exists`
:param location_slug: Location slug string to test if a location
exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.slug_exists(location_slug)
assert result, f"Location slug {location_slug} does not exist"
@pytest.mark.parametrize("location_slug", ["the-chicago-theatre-chicago-li"])
def test_location_utility_slug_not_exists(location_slug: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slug_exists`
with venue name
:param location_slug: Location slug string to test if a location
does not exists
"""
utility = LocationUtility(connect_dict=get_connect_dict())
result = utility.slug_exists(location_slug)
assert not result, f"Location slug {location_slug} exists"
@pytest.mark.parametrize("city",
["Chicago"])
def test_location_utility_slugify_location_city(city: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with city name
:param city: City to include in the slug string
"""
with pytest.raises(ValueError):
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(city=city)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("city, state",
[("Chicago", "IL")])
def test_location_utility_slugify_location_city_state(city: str, state: str):
"""Negative testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with city and state names
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
with pytest.raises(ValueError):
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id, venue, city, state",
[(2, "Chase Auditorium", "Chicago", "IL")])
def test_location_utility_slugify_location_full(location_id: int,
venue: str,
city: str,
state: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with location ID, venue, city and state names
:param location_id: Location ID to include in the slug string
:param venue: Venue name to include in the slug string
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id, venue=venue,
city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id, venue", [(2, "Chase Auditorium")])
def test_location_utility_slugify_location_venue(location_id: int,
venue: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with venue name
:param location_id: Location ID to include in the slug string
:param venue: Venue name to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id, venue=venue)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("venue, city, state",
[("Chase Auditorium", "Chicago", "IL")])
def test_location_utility_slugify_location_venue_city_state(venue: str,
city: str,
state: str):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
:param venue: Venue name to include in the slug string
:param city: City to include in the slug string
:param state: State to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(venue=venue, city=city, state=state)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
@pytest.mark.parametrize("location_id", [2])
def test_location_utility_slugify_location_id(location_id: int):
"""Testing for :py:meth:`wwdtm.location.LocationUtility.slugify_location`
with venue, city and state names
:param location_id: Location ID to include in the slug string
"""
utility = LocationUtility(connect_dict=get_connect_dict())
slug = utility.slugify_location(location_id=location_id)
assert slug, "Unable to convert into a slug string"
assert isinstance(slug, str), "Value returned is not a string"
| 38.965665
| 88
| 0.702941
| 1,193
| 9,079
| 5.157586
| 0.09891
| 0.081261
| 0.068259
| 0.050057
| 0.88786
| 0.870145
| 0.835365
| 0.802373
| 0.794897
| 0.768243
| 0
| 0.003038
| 0.202335
| 9,079
| 232
| 89
| 39.133621
| 0.846589
| 0.319529
| 0
| 0.484848
| 0
| 0
| 0.196663
| 0.020432
| 0
| 0
| 0
| 0
| 0.20202
| 1
| 0.151515
| false
| 0
| 0.040404
| 0
| 0.20202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
06b29440122743c4d662f5e0b42777454bfb53b1
| 2,600
|
py
|
Python
|
tfcli/resources/asg.py
|
leowa/tfcli
|
21314feabcb56fe802298a98a66eb4e2a9de8cc7
|
[
"MIT"
] | null | null | null |
tfcli/resources/asg.py
|
leowa/tfcli
|
21314feabcb56fe802298a98a66eb4e2a9de8cc7
|
[
"MIT"
] | null | null | null |
tfcli/resources/asg.py
|
leowa/tfcli
|
21314feabcb56fe802298a98a66eb4e2a9de8cc7
|
[
"MIT"
] | null | null | null |
from .base import BaseResource
class Asg(BaseResource):
""" autoscaling group resource to generate from current region
"""
def __init__(self, logger=None):
super().__init__(logger)
def amend_attributes(self, _type, _name, attributes: dict):
if "launch_template" in attributes and attributes["launch_template"]:
tpl = attributes["launch_template"][0]
if "id" in tpl and "name" in tpl: # remove if from template if name exists
del tpl["id"]
return attributes
@classmethod
def ignore_attrbute(cls, key, value):
if key in ["id", "owner_id", "arn"]:
return True
return False
@classmethod
def included_resource_types(cls):
"""resource types for this resource and its derived resources
"""
return [
"aws_autoscaling_group",
]
def list_all(self):
"""list all such kind of resources from AWS
:return: list of tupe for a resource (type, name, id)
"""
asg = self.session.client("autoscaling")
items = asg.describe_auto_scaling_groups()["AutoScalingGroups"]
for item in items:
_name = _id = item["AutoScalingGroupName"]
yield "aws_autoscaling_group", _name, _id
class LaunchTemplate(BaseResource):
""" launch template resource to generate from current region
"""
def __init__(self, logger=None):
super().__init__(logger)
def amend_attributes(self, _type, _name, attributes: dict):
if "launch_template" in attributes and attributes["launch_template"]:
tpl = attributes["launch_template"][0]
if "id" in tpl and "name" in tpl: # remove if from template if name exists
del tpl["id"]
return attributes
@classmethod
def ignore_attrbute(cls, key, value):
if key in ["id", "owner_id", "arn", "default_version", "latest_version"]:
return True
return False
@classmethod
def included_resource_types(cls):
"""resource types for this resource and its derived resources
"""
return [
"aws_launch_template",
]
def list_all(self):
"""list all such kind of resources from AWS
:return: list of tupe for a resource (type, name, id)
"""
ec2 = self.session.client("ec2")
items = ec2.describe_launch_templates()["LaunchTemplates"]
for item in items:
_name = _id = item["LaunchTemplateId"]
yield "aws_launch_template", _name, _id
| 31.707317
| 87
| 0.611154
| 299
| 2,600
| 5.110368
| 0.267559
| 0.082461
| 0.062827
| 0.028796
| 0.732984
| 0.732984
| 0.732984
| 0.701571
| 0.701571
| 0.701571
| 0
| 0.002714
| 0.291538
| 2,600
| 81
| 88
| 32.098765
| 0.826819
| 0.204615
| 0
| 0.666667
| 1
| 0
| 0.1615
| 0.021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196078
| false
| 0
| 0.019608
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
230db22fc190c68752be940d1363fe5ecdb2a558
| 169
|
py
|
Python
|
backend/api/models.py
|
tuguldurio/fullstack-ecommerce
|
06257e704c657b008587aabb4075750899149b1d
|
[
"MIT"
] | null | null | null |
backend/api/models.py
|
tuguldurio/fullstack-ecommerce
|
06257e704c657b008587aabb4075750899149b1d
|
[
"MIT"
] | null | null | null |
backend/api/models.py
|
tuguldurio/fullstack-ecommerce
|
06257e704c657b008587aabb4075750899149b1d
|
[
"MIT"
] | null | null | null |
from api.user.models import User
from api.cart.models import Cart, CartProduct
from api.order.models import Order, OrderProduct
from api.product.models import Product
| 42.25
| 49
| 0.822485
| 26
| 169
| 5.346154
| 0.384615
| 0.201439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118343
| 169
| 4
| 50
| 42.25
| 0.932886
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
23240f288abf89b78f596d8ce66de1c2719d6da7
| 43
|
py
|
Python
|
app/data/__init__.py
|
codenio/cvcam
|
4bfb16ae20375abee9dfdf0383c0df0bb5b31db7
|
[
"MIT"
] | 2
|
2021-02-12T10:10:41.000Z
|
2022-02-01T12:29:34.000Z
|
app/data/__init__.py
|
codenio/cvcam
|
4bfb16ae20375abee9dfdf0383c0df0bb5b31db7
|
[
"MIT"
] | null | null | null |
app/data/__init__.py
|
codenio/cvcam
|
4bfb16ae20375abee9dfdf0383c0df0bb5b31db7
|
[
"MIT"
] | 1
|
2020-08-08T17:19:05.000Z
|
2020-08-08T17:19:05.000Z
|
from .lite_data_store import LiteDataStore
| 21.5
| 42
| 0.883721
| 6
| 43
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
232aa8e2e7ba295ede12f5cba7bf5a933e010de8
| 31,253
|
py
|
Python
|
pytest_docker_registry_fixtures/fixtures.py
|
crashvb/pytest-docker-registry-fixtures
|
aab57393f8478982751da140e259eb4bf81869a7
|
[
"Apache-2.0"
] | null | null | null |
pytest_docker_registry_fixtures/fixtures.py
|
crashvb/pytest-docker-registry-fixtures
|
aab57393f8478982751da140e259eb4bf81869a7
|
[
"Apache-2.0"
] | 1
|
2021-02-17T04:23:09.000Z
|
2021-02-17T04:29:22.000Z
|
pytest_docker_registry_fixtures/fixtures.py
|
crashvb/pytest-docker-registry-fixtures
|
aab57393f8478982751da140e259eb4bf81869a7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=redefined-outer-name,too-many-arguments,too-many-locals
"""The actual fixtures, you found them ;)."""
import logging
import itertools
from base64 import b64encode
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from ssl import create_default_context, SSLContext
from string import Template
from time import sleep, time
from typing import Dict, Generator, List, NamedTuple
import pytest
from docker import DockerClient, from_env
from lovely.pytest.docker.compose import Services
from _pytest.tmpdir import TempPathFactory
from .imagename import ImageName
from .utils import (
check_url_secure,
DOCKER_REGISTRY_SERVICE,
DOCKER_REGISTRY_SERVICE_PATTERN,
generate_cacerts,
generate_htpasswd,
generate_keypair,
get_docker_compose_user_defined,
get_embedded_file,
get_user_defined_file,
replicate_image,
start_service,
)
# Caching is needed, as singular-fixtures and list-fixtures will conflict at scale_factor=1
# This appears to only matter when attempting to start the docker secure registry service
# for the second time.
CACHE = {}
LOGGER = logging.getLogger(__name__)
class DockerRegistryCerts(NamedTuple):
# pylint: disable=missing-class-docstring
ca_certificate: Path
ca_private_key: Path
certificate: Path
private_key: Path
class DockerRegistryInsecure(NamedTuple):
# pylint: disable=missing-class-docstring
docker_client: DockerClient
docker_compose: Path
endpoint: str
images: List[ImageName]
service_name: str
# Note: NamedTuple does not support inheritance :(
class DockerRegistrySecure(NamedTuple):
# pylint: disable=missing-class-docstring
auth_header: Dict[str, str]
cacerts: Path
certs: DockerRegistryCerts
docker_client: DockerClient
docker_compose: Path
endpoint: str
htpasswd: Path
images: List[ImageName]
password: str
service_name: str
ssl_context: SSLContext
username: str
@pytest.fixture(scope="session")
def docker_client() -> DockerClient:
"""Provides an insecure Docker API client."""
return from_env()
def _docker_compose_insecure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
cache_key = _docker_compose_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
# TODO: lovely-docker-compose uses the file for teardown ...
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_insecure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
for lst in _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_insecure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
yield from _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_compose_secure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
cache_key = _docker_compose_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_secure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
for lst in _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_secure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
yield from _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_auth_header(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
cache_key = _docker_registry_auth_header.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
auth = b64encode(
f"{docker_registry_username_list[i]}:{docker_registry_password_list[i]}".encode(
"utf-8"
)
).decode("utf-8")
result.append({"Authorization": f"Basic {auth}"})
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_auth_header(
docker_registry_password: str, docker_registry_username: str
) -> Dict[str, str]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
scale_factor=1,
)[0]
@pytest.fixture(scope="session")
def docker_registry_auth_header_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=docker_registry_password_list,
docker_registry_username_list=docker_registry_username_list,
scale_factor=pdrf_scale_factor,
)
def _docker_registry_cacerts(
*,
docker_registry_certs_list: List[DockerRegistryCerts],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
cache_key = _docker_registry_cacerts.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "cacerts"),
generate_cacerts(
tmp_path_factory,
certificate=docker_registry_certs_list[i].ca_certificate,
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate cacerts!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_cacerts(
docker_registry_certs: DockerRegistryCerts,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
for lst in _docker_registry_cacerts(
docker_registry_certs_list=[docker_registry_certs],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_cacerts_list(
docker_registry_certs_list: List[DockerRegistryCerts],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
yield from _docker_registry_cacerts(
docker_registry_certs_list=docker_registry_certs_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_certs(
*, scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
# TODO: Augment to allow for reading certificates from /test ...
cache_key = _docker_registry_certs.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
tmp_path = tmp_path_factory.mktemp(__name__)
keypair = generate_keypair()
docker_registry_cert = DockerRegistryCerts(
ca_certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.crt"),
ca_private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.key"),
certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.crt"),
private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.key"),
)
docker_registry_cert.ca_certificate.write_bytes(keypair.ca_certificate)
docker_registry_cert.ca_private_key.write_bytes(keypair.ca_private_key)
docker_registry_cert.certificate.write_bytes(keypair.certificate)
docker_registry_cert.private_key.write_bytes(keypair.private_key)
result.append(docker_registry_cert)
CACHE[cache_key] = result
yield result
for docker_registry_cert in result:
docker_registry_cert.ca_certificate.unlink(missing_ok=True)
docker_registry_cert.ca_private_key.unlink(missing_ok=True)
docker_registry_cert.certificate.unlink(missing_ok=True)
docker_registry_cert.private_key.unlink(missing_ok=True)
@pytest.fixture(scope="session")
def docker_registry_certs(
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryCerts, None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
for lst in _docker_registry_certs(
scale_factor=1, tmp_path_factory=tmp_path_factory
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_certs_list(
pdrf_scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
yield from _docker_registry_certs(
scale_factor=pdrf_scale_factor, tmp_path_factory=tmp_path_factory
)
def _docker_registry_htpasswd(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
cache_key = _docker_registry_htpasswd.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "htpasswd"),
generate_htpasswd(
tmp_path_factory,
username=docker_registry_username_list[i],
password=docker_registry_password_list[i],
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate htpasswd!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_htpasswd(
docker_registry_password: str,
docker_registry_username: str,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
for lst in _docker_registry_htpasswd(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_htpasswd_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
yield from _docker_registry_htpasswd(
docker_registry_username_list=docker_registry_username_list,
docker_registry_password_list=docker_registry_password_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_insecure(
*,
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
cache_key = _docker_registry_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_insecure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
# Note: Needed to correctly populate the embedded, consolidated, service template ...
"PATH_CERTIFICATE": "/dev/null",
"PATH_HTPASSWD": "/dev/null",
"PATH_KEY": "/dev/null",
}
),
"utf-8",
)
LOGGER.debug("Starting insecure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(" service name : %s", service_name)
endpoint = start_service(
docker_services,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Insecure docker registry endpoint [%d]: %s", i, endpoint)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistryInsecure(
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
images=images,
service_name=service_name,
)
)
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_insecure(
docker_client: DockerClient,
docker_compose_insecure: Path,
docker_services: Services,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryInsecure, None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
for lst in _docker_registry_insecure(
docker_client=docker_client,
docker_compose_insecure_list=[docker_compose_insecure],
docker_services=docker_services,
request=request,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_insecure_list(
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
pdrf_scale_factor: int,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
yield from _docker_registry_insecure(
docker_client=docker_client,
docker_compose_insecure_list=docker_compose_insecure_list,
docker_services=docker_services,
request=request,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_password(*, scale_factor: int) -> List[str]:
"""Provides the password to use for authentication to the secure registry service."""
cache_key = _docker_registry_password.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.password.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_password() -> str:
"""Provides the password to use for authentication to the secure registry service."""
return _docker_registry_password(scale_factor=1)[0]
@pytest.fixture(scope="session")
def docker_registry_password_list(pdrf_scale_factor: int) -> List[str]:
"""Provides the password to use for authentication to the secure registry service."""
return _docker_registry_password(scale_factor=pdrf_scale_factor)
def _docker_registry_secure(
*,
docker_client: DockerClient,
docker_compose_secure_list: List[Path],
docker_registry_auth_header_list: List[Dict[str, str]],
docker_registry_cacerts_list: List[Path],
docker_registry_certs_list: List[DockerRegistryCerts],
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_ssl_context_list: List[SSLContext],
docker_registry_username_list: List[str],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistrySecure], None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
cache_key = _docker_registry_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_secure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
"PATH_CERTIFICATE": docker_registry_certs_list[i].certificate,
"PATH_HTPASSWD": docker_registry_htpasswd_list[i],
"PATH_KEY": docker_registry_certs_list[i].private_key,
}
),
"utf-8",
)
LOGGER.debug("Starting secure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(
" ca certificate : %s", docker_registry_certs_list[i].ca_certificate
)
LOGGER.debug(" certificate : %s", docker_registry_certs_list[i].certificate)
LOGGER.debug(" htpasswd : %s", docker_registry_htpasswd_list[i])
LOGGER.debug(" private key : %s", docker_registry_certs_list[i].private_key)
LOGGER.debug(" password : %s", docker_registry_password_list[i])
LOGGER.debug(" service name : %s", service_name)
LOGGER.debug(" username : %s", docker_registry_username_list[i])
check_server = partial(
check_url_secure,
auth_header=docker_registry_auth_header_list[i],
ssl_context=docker_registry_ssl_context_list[i],
)
endpoint = start_service(
docker_services,
check_server=check_server,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Secure docker registry endpoint [%d]: %s", i, endpoint)
# DUCK PUNCH: Inject the secure docker registry credentials into the docker client ...
docker_client.api._auth_configs.add_auth( # pylint: disable=protected-access
endpoint,
{
"password": docker_registry_password_list[i],
"username": docker_registry_username_list[i],
},
)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistrySecure(
auth_header=docker_registry_auth_header_list[i],
cacerts=docker_registry_cacerts_list[i],
certs=docker_registry_certs_list[i],
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
htpasswd=docker_registry_htpasswd_list[i],
password=docker_registry_password_list[i],
images=images,
service_name=service_name,
ssl_context=docker_registry_ssl_context_list[i],
username=docker_registry_username_list[i],
)
)
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_secure(
docker_client: DockerClient,
docker_compose_secure: Path,
docker_registry_auth_header: Dict[str, str],
docker_registry_cacerts: Path,
docker_registry_certs: DockerRegistryCerts,
docker_registry_htpasswd: Path,
docker_registry_password: str,
docker_registry_ssl_context: SSLContext,
docker_registry_username: str,
docker_services: Services,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistrySecure, None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
for lst in _docker_registry_secure(
docker_client=docker_client,
docker_compose_secure_list=[docker_compose_secure],
docker_registry_auth_header_list=[docker_registry_auth_header],
docker_registry_cacerts_list=[docker_registry_cacerts],
docker_registry_certs_list=[docker_registry_certs],
docker_registry_htpasswd_list=[docker_registry_htpasswd],
docker_registry_password_list=[docker_registry_password],
docker_registry_ssl_context_list=[docker_registry_ssl_context],
docker_registry_username_list=[docker_registry_username],
docker_services=docker_services,
request=request,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_secure_list(
docker_client: DockerClient,
docker_compose_secure_list: List[Path],
docker_registry_auth_header_list: List[Dict[str, str]],
docker_registry_cacerts_list: List[Path],
docker_registry_certs_list: List[DockerRegistryCerts],
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_ssl_context_list: List[SSLContext],
docker_registry_username_list: List[str],
docker_services: Services,
pdrf_scale_factor: int,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistrySecure], None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
yield from _docker_registry_secure(
docker_client=docker_client,
docker_compose_secure_list=docker_compose_secure_list,
docker_registry_auth_header_list=docker_registry_auth_header_list,
docker_registry_cacerts_list=docker_registry_cacerts_list,
docker_registry_certs_list=docker_registry_certs_list,
docker_registry_htpasswd_list=docker_registry_htpasswd_list,
docker_registry_password_list=docker_registry_password_list,
docker_registry_ssl_context_list=docker_registry_ssl_context_list,
docker_registry_username_list=docker_registry_username_list,
docker_services=docker_services,
request=request,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_ssl_context(
*, docker_registry_cacerts_list: List[Path], scale_factor: int
) -> List[SSLContext]:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
cache_key = _docker_registry_ssl_context.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(
create_default_context(cafile=str(docker_registry_cacerts_list[i]))
)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_ssl_context(docker_registry_cacerts: Path) -> SSLContext:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
return _docker_registry_ssl_context(
docker_registry_cacerts_list=[docker_registry_cacerts], scale_factor=1
)[0]
@pytest.fixture(scope="session")
def docker_registry_ssl_context_list(
docker_registry_cacerts_list: List[Path],
pdrf_scale_factor: int,
) -> List[SSLContext]:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
return _docker_registry_ssl_context(
docker_registry_cacerts_list=docker_registry_cacerts_list,
scale_factor=pdrf_scale_factor,
)
def _docker_registry_username(*, scale_factor: int) -> List[str]:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
cache_key = _docker_registry_username.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.username.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_username() -> str:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
return _docker_registry_username(scale_factor=1)[0]
@pytest.fixture(scope="session")
def docker_registry_username_list(
pdrf_scale_factor: int,
) -> List[str]:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
return _docker_registry_username(scale_factor=pdrf_scale_factor)
@pytest.fixture(scope="session")
def pdrf_scale_factor() -> int:
"""Provides the number enumerated instances to be instantiated."""
return 1
def _replicate_images(
docker_client: DockerClient, endpoint: str, request
) -> List[ImageName]:
"""
Replicates all marked images to a docker registry service at a given endpoint.
Args:
docker_client: Docker client with which to replicate the marked images.
endpoint: The endpoint of the docker registry service.
request: The pytest requests object from which to retrieve the marks.
Returns: The list of images that were replicated.
"""
always_pull = strtobool(str(request.config.getoption("--always-pull", True)))
images = request.config.getoption("--push-image", [])
# images.extend(request.node.get_closest_marker("push_image", []))
# * Split ',' separated lists
# * Remove duplicates - see conftest.py::pytest_collection_modifyitems()
images = [image for i in images for image in i.split(",")]
images = [ImageName.parse(image) for image in list(set(images))]
for image in images:
LOGGER.debug("- %s", image)
try:
replicate_image(docker_client, image, endpoint, always_pull=always_pull)
except Exception as exception: # pylint: disable=broad-except
LOGGER.warning(
"Unable to replicate image '%s': %s", image, exception, exc_info=True
)
return images
| 35.964327
| 118
| 0.696381
| 3,662
| 31,253
| 5.617968
| 0.075369
| 0.151752
| 0.038108
| 0.029164
| 0.830943
| 0.798133
| 0.760706
| 0.728722
| 0.675205
| 0.632042
| 0
| 0.001801
| 0.218155
| 31,253
| 868
| 119
| 36.00576
| 0.840147
| 0.157777
| 0
| 0.625378
| 0
| 0
| 0.060815
| 0.01639
| 0
| 0
| 0
| 0.002304
| 0
| 1
| 0.054381
| false
| 0.072508
| 0.024169
| 0
| 0.137462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
23300efdd697b2575e312f7edd92461f467cdc9c
| 161
|
py
|
Python
|
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gis/forms/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.gis.forms.fields import CoordinatesField
from onegov.gis.forms.widgets import CoordinatesWidget
__all__ = ['CoordinatesField', 'CoordinatesWidget']
| 32.2
| 54
| 0.832298
| 17
| 161
| 7.647059
| 0.588235
| 0.153846
| 0.2
| 0.276923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080745
| 161
| 4
| 55
| 40.25
| 0.878378
| 0
| 0
| 0
| 0
| 0
| 0.204969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2354fdf8dad70153d9baf4c5be2ae3e5d8f5ea68
| 47
|
py
|
Python
|
lotoes/secciones/sorteosLnac/__init__.py
|
vidddd/lotoes
|
caf5fe71006e00e590549f921052f110c4bbb75f
|
[
"MIT"
] | null | null | null |
lotoes/secciones/sorteosLnac/__init__.py
|
vidddd/lotoes
|
caf5fe71006e00e590549f921052f110c4bbb75f
|
[
"MIT"
] | null | null | null |
lotoes/secciones/sorteosLnac/__init__.py
|
vidddd/lotoes
|
caf5fe71006e00e590549f921052f110c4bbb75f
|
[
"MIT"
] | null | null | null |
from .controller_sorteosLnac import sorteosLnac
| 47
| 47
| 0.914894
| 5
| 47
| 8.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
235b2d901b1bea2fa217606a67dfa81205191041
| 23
|
py
|
Python
|
sensu_plugins_aws_subnet/__init__.py
|
supernova106/sensu_plugins_aws_subnet
|
07edd3b414def15809c331b7269ecdafd3faf762
|
[
"MIT"
] | 12
|
2021-08-15T04:38:25.000Z
|
2021-08-16T18:17:25.000Z
|
sensu_plugins_aws_subnet/__init__.py
|
supernova106/sensu_plugins_aws_subnet
|
07edd3b414def15809c331b7269ecdafd3faf762
|
[
"MIT"
] | 1
|
2020-12-05T18:35:55.000Z
|
2020-12-05T18:35:55.000Z
|
sensu_plugins_aws_subnet/__init__.py
|
supernova106/sensu_plugins_aws_subnet
|
07edd3b414def15809c331b7269ecdafd3faf762
|
[
"MIT"
] | 2
|
2021-08-15T09:29:43.000Z
|
2021-11-17T05:41:41.000Z
|
from __main__ import *
| 11.5
| 22
| 0.782609
| 3
| 23
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
23658b032c06956a00496d7055711bc9d8118a63
| 26
|
py
|
Python
|
hello_world.py
|
fordjango/new_profiles_rest_api
|
b4086ad4211e5e278b2a8bcf3624f48925ea6040
|
[
"MIT"
] | null | null | null |
hello_world.py
|
fordjango/new_profiles_rest_api
|
b4086ad4211e5e278b2a8bcf3624f48925ea6040
|
[
"MIT"
] | null | null | null |
hello_world.py
|
fordjango/new_profiles_rest_api
|
b4086ad4211e5e278b2a8bcf3624f48925ea6040
|
[
"MIT"
] | null | null | null |
print("hello from santa")
| 13
| 25
| 0.730769
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
88cd7b4748dfc9d48b07e74cd1faaed730733d74
| 55
|
py
|
Python
|
python.py
|
Ayesha-Anjum-639/assignment
|
5a57fdfd360467d540cf12fe0f842ddd458371b8
|
[
"MIT"
] | 1
|
2019-10-12T17:28:12.000Z
|
2019-10-12T17:28:12.000Z
|
python.py
|
Ayesha-Anjum-639/assignment
|
5a57fdfd360467d540cf12fe0f842ddd458371b8
|
[
"MIT"
] | null | null | null |
python.py
|
Ayesha-Anjum-639/assignment
|
5a57fdfd360467d540cf12fe0f842ddd458371b8
|
[
"MIT"
] | null | null | null |
print("Hello World")
print(5+4)
print(5,"+",4,"=",5+4)
| 13.75
| 22
| 0.563636
| 11
| 55
| 2.818182
| 0.454545
| 0.193548
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.072727
| 55
| 3
| 23
| 18.333333
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0.236364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
88f03654581e59a140ad7f0b316b54846b6a53fc
| 99
|
py
|
Python
|
openfecli/commands/__init__.py
|
mikemhenry/openfe
|
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
|
[
"MIT"
] | 14
|
2022-01-24T22:01:19.000Z
|
2022-03-31T04:58:35.000Z
|
openfecli/commands/__init__.py
|
mikemhenry/openfe
|
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
|
[
"MIT"
] | 109
|
2022-01-24T18:57:05.000Z
|
2022-03-31T20:13:07.000Z
|
openfecli/commands/__init__.py
|
mikemhenry/openfe
|
d4c78af62a7ae05b99eb95d173661ac134b7e7b9
|
[
"MIT"
] | 4
|
2022-01-24T18:45:54.000Z
|
2022-02-21T06:28:24.000Z
|
# shouldn't apparently need this file, but here we are
from . import atommapping
from . import echo
| 33
| 54
| 0.777778
| 16
| 99
| 4.8125
| 0.875
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171717
| 99
| 3
| 55
| 33
| 0.939024
| 0.525253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00366b8cd6a29a13103561a0ee4650cafb902f88
| 124
|
py
|
Python
|
pangram/pangram.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
pangram/pangram.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
pangram/pangram.py
|
oscantillomen/Exercism-Python
|
1a598769aff0e4dd58294fcd692ca0402061717e
|
[
"MIT"
] | null | null | null |
import string
ALPHABET = set(string.ascii_lowercase)
def is_pangram(sentence):
return ALPHABET <= set(sentence.lower())
| 24.8
| 44
| 0.766129
| 16
| 124
| 5.8125
| 0.75
| 0.236559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 5
| 44
| 24.8
| 0.853211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
cc7588de324a87e070270762efbca68576fe8829
| 85
|
py
|
Python
|
ndarray/same.py
|
Hupengyu/Paddle_learning
|
0ac1e2ad32e41ac87bbb19e4535a4bc253ca9b0f
|
[
"Apache-2.0"
] | 1
|
2021-08-02T01:51:35.000Z
|
2021-08-02T01:51:35.000Z
|
ndarray/same.py
|
Hupengyu/Paddle_learning
|
0ac1e2ad32e41ac87bbb19e4535a4bc253ca9b0f
|
[
"Apache-2.0"
] | 1
|
2021-11-03T08:58:30.000Z
|
2021-11-03T08:58:30.000Z
|
ndarray/same.py
|
Hupengyu/Paddle_learning
|
0ac1e2ad32e41ac87bbb19e4535a4bc253ca9b0f
|
[
"Apache-2.0"
] | null | null | null |
mask = 255
print(mask == 255)
blue_mask = mask == 255
print(mask)
print(blue_mask)
| 10.625
| 23
| 0.682353
| 14
| 85
| 4
| 0.285714
| 0.375
| 0.428571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 0.176471
| 85
| 8
| 24
| 10.625
| 0.671429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
aeb56753078f68e7ebf914dfe3362d2ce395b9ab
| 44
|
py
|
Python
|
holoprot/models/__init__.py
|
vsomnath/holoprot
|
9bd6c58491eec701db94ce12f8e15e2143e202b9
|
[
"MIT"
] | 10
|
2022-01-19T19:01:35.000Z
|
2022-03-21T13:04:59.000Z
|
holoprot/models/__init__.py
|
vsomnath/holoprot
|
9bd6c58491eec701db94ce12f8e15e2143e202b9
|
[
"MIT"
] | null | null | null |
holoprot/models/__init__.py
|
vsomnath/holoprot
|
9bd6c58491eec701db94ce12f8e15e2143e202b9
|
[
"MIT"
] | 3
|
2022-01-11T16:21:32.000Z
|
2022-03-11T15:33:57.000Z
|
from holoprot.models.trainer import Trainer
| 22
| 43
| 0.863636
| 6
| 44
| 6.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aef1f46a6f4fb6e4545b68a9cb41e8f97c07f8ea
| 92
|
py
|
Python
|
custom/plugins/setup_oer_reports_pre.py
|
M-Spencer-94/configNOW
|
56828587253202089e77cfdfcf5329f2a7f09b3f
|
[
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-07-09T20:02:48.000Z
|
2021-11-21T20:00:37.000Z
|
custom/plugins/setup_oer_reports_pre.py
|
M-Spencer-94/configNOW
|
56828587253202089e77cfdfcf5329f2a7f09b3f
|
[
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
custom/plugins/setup_oer_reports_pre.py
|
M-Spencer-94/configNOW
|
56828587253202089e77cfdfcf5329f2a7f09b3f
|
[
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
import common.assertions as assertions
def run(cfg):
assertions.validateAdminPassword(cfg)
| 23
| 38
| 0.836957
| 11
| 92
| 7
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 92
| 4
| 39
| 23
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
4e02feb6bd33bf7b2f8ebc85d438cb20d237fd9e
| 30
|
py
|
Python
|
blind_blizzards/data/game.py
|
Starwort/code-jam-5
|
c11ab7508ca8c68fe64f33118a3a44956c0a8292
|
[
"MIT"
] | null | null | null |
blind_blizzards/data/game.py
|
Starwort/code-jam-5
|
c11ab7508ca8c68fe64f33118a3a44956c0a8292
|
[
"MIT"
] | null | null | null |
blind_blizzards/data/game.py
|
Starwort/code-jam-5
|
c11ab7508ca8c68fe64f33118a3a44956c0a8292
|
[
"MIT"
] | 1
|
2019-06-28T21:59:41.000Z
|
2019-06-28T21:59:41.000Z
|
from .structs import GameNode
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e28e3321377547a62600b472fa76b37318df52d
| 37,697
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int1/68.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int1/68.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int1/68.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 2290
passenger_arriving = (
(0, 5, 9, 3, 0, 0, 3, 7, 5, 2, 1, 0), # 0
(2, 4, 10, 6, 0, 0, 2, 3, 4, 2, 4, 0), # 1
(4, 9, 7, 4, 2, 0, 4, 5, 7, 4, 6, 0), # 2
(9, 9, 6, 4, 1, 0, 9, 8, 2, 5, 3, 0), # 3
(4, 6, 4, 8, 2, 0, 5, 6, 7, 3, 3, 0), # 4
(4, 3, 5, 3, 1, 0, 5, 8, 2, 2, 0, 0), # 5
(2, 2, 4, 5, 2, 0, 1, 1, 7, 1, 1, 0), # 6
(2, 3, 3, 5, 1, 0, 3, 2, 4, 2, 2, 0), # 7
(2, 7, 5, 5, 0, 0, 6, 3, 4, 8, 1, 0), # 8
(1, 9, 7, 4, 2, 0, 8, 7, 4, 8, 2, 0), # 9
(3, 5, 7, 3, 0, 0, 4, 10, 4, 3, 3, 0), # 10
(1, 4, 6, 2, 1, 0, 2, 3, 6, 8, 0, 0), # 11
(5, 2, 1, 2, 0, 0, 4, 9, 6, 2, 1, 0), # 12
(5, 1, 5, 3, 1, 0, 2, 4, 3, 7, 1, 0), # 13
(3, 6, 6, 2, 1, 0, 5, 4, 0, 4, 0, 0), # 14
(4, 2, 7, 2, 1, 0, 7, 10, 7, 4, 2, 0), # 15
(4, 6, 5, 5, 1, 0, 1, 14, 4, 1, 1, 0), # 16
(3, 5, 4, 2, 3, 0, 3, 5, 2, 6, 1, 0), # 17
(4, 4, 8, 2, 2, 0, 3, 5, 6, 3, 0, 0), # 18
(2, 7, 7, 2, 0, 0, 7, 2, 6, 1, 3, 0), # 19
(3, 7, 7, 2, 0, 0, 8, 9, 3, 1, 2, 0), # 20
(2, 8, 6, 2, 1, 0, 5, 5, 4, 3, 0, 0), # 21
(4, 6, 4, 1, 3, 0, 7, 4, 4, 5, 1, 0), # 22
(1, 5, 4, 3, 1, 0, 1, 5, 3, 5, 3, 0), # 23
(2, 9, 4, 1, 0, 0, 6, 6, 4, 7, 2, 0), # 24
(4, 8, 7, 2, 2, 0, 3, 6, 4, 1, 4, 0), # 25
(4, 6, 5, 2, 4, 0, 2, 0, 2, 4, 0, 0), # 26
(3, 4, 6, 4, 2, 0, 5, 10, 2, 3, 3, 0), # 27
(3, 12, 6, 3, 1, 0, 4, 12, 4, 2, 3, 0), # 28
(7, 8, 3, 3, 1, 0, 3, 3, 3, 4, 2, 0), # 29
(1, 12, 5, 0, 4, 0, 1, 4, 4, 5, 0, 0), # 30
(5, 8, 8, 3, 5, 0, 4, 7, 0, 4, 3, 0), # 31
(1, 14, 4, 4, 0, 0, 7, 7, 2, 3, 1, 0), # 32
(3, 7, 4, 2, 1, 0, 2, 5, 3, 2, 2, 0), # 33
(1, 7, 3, 3, 1, 0, 4, 11, 3, 5, 0, 0), # 34
(2, 5, 5, 4, 0, 0, 7, 6, 4, 5, 0, 0), # 35
(4, 7, 7, 3, 2, 0, 5, 7, 5, 1, 0, 0), # 36
(2, 6, 9, 8, 0, 0, 3, 9, 8, 0, 1, 0), # 37
(3, 4, 6, 2, 4, 0, 4, 5, 2, 0, 1, 0), # 38
(2, 6, 6, 1, 1, 0, 5, 7, 3, 8, 1, 0), # 39
(3, 8, 8, 3, 0, 0, 4, 3, 4, 9, 2, 0), # 40
(2, 3, 2, 2, 1, 0, 4, 9, 3, 6, 3, 0), # 41
(1, 8, 10, 0, 0, 0, 5, 12, 4, 4, 4, 0), # 42
(4, 11, 3, 2, 2, 0, 6, 5, 5, 4, 3, 0), # 43
(2, 7, 12, 2, 1, 0, 1, 4, 4, 1, 1, 0), # 44
(0, 9, 5, 1, 4, 0, 10, 4, 4, 6, 0, 0), # 45
(5, 4, 4, 0, 1, 0, 2, 4, 5, 3, 2, 0), # 46
(2, 5, 4, 0, 0, 0, 5, 9, 5, 5, 0, 0), # 47
(1, 10, 3, 4, 1, 0, 3, 3, 4, 4, 1, 0), # 48
(4, 6, 3, 4, 2, 0, 3, 6, 5, 2, 1, 0), # 49
(3, 6, 4, 5, 0, 0, 5, 9, 7, 3, 1, 0), # 50
(3, 6, 7, 2, 1, 0, 4, 5, 1, 3, 8, 0), # 51
(3, 11, 2, 4, 2, 0, 5, 7, 4, 7, 0, 0), # 52
(3, 8, 7, 3, 2, 0, 6, 9, 4, 3, 2, 0), # 53
(2, 7, 9, 1, 3, 0, 7, 6, 5, 2, 2, 0), # 54
(5, 10, 5, 2, 2, 0, 4, 5, 4, 4, 2, 0), # 55
(2, 6, 6, 1, 5, 0, 3, 3, 2, 3, 2, 0), # 56
(3, 3, 2, 3, 0, 0, 5, 6, 4, 8, 0, 0), # 57
(2, 7, 5, 2, 2, 0, 0, 1, 2, 3, 0, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(2.649651558384548, 6.796460700757575, 7.9942360218509, 6.336277173913043, 7.143028846153846, 4.75679347826087), # 0
(2.6745220100478, 6.872041598712823, 8.037415537524994, 6.371564387077295, 7.196566506410256, 4.7551721391908215), # 1
(2.699108477221734, 6.946501402918069, 8.07957012282205, 6.406074879227053, 7.248974358974359, 4.753501207729468), # 2
(2.72339008999122, 7.019759765625, 8.120668982969152, 6.4397792119565205, 7.300204326923078, 4.7517809103260875), # 3
(2.747345978441128, 7.091736339085298, 8.160681323193373, 6.472647946859904, 7.350208333333334, 4.750011473429951), # 4
(2.7709552726563262, 7.162350775550646, 8.199576348721793, 6.504651645531401, 7.39893830128205, 4.748193123490338), # 5
(2.794197102721686, 7.231522727272727, 8.237323264781493, 6.535760869565218, 7.446346153846154, 4.746326086956522), # 6
(2.817050598722076, 7.299171846503226, 8.273891276599542, 6.565946180555556, 7.492383814102565, 4.744410590277778), # 7
(2.8394948907423667, 7.365217785493826, 8.309249589403029, 6.595178140096618, 7.537003205128205, 4.7424468599033816), # 8
(2.8615091088674274, 7.429580196496212, 8.343367408419024, 6.623427309782609, 7.580156249999999, 4.740435122282609), # 9
(2.8830723831821286, 7.492178731762065, 8.376213938874606, 6.65066425120773, 7.621794871794872, 4.738375603864734), # 10
(2.9041638437713395, 7.55293304354307, 8.407758385996857, 6.676859525966184, 7.661870993589743, 4.736268531099034), # 11
(2.92476262071993, 7.611762784090908, 8.437969955012854, 6.7019836956521734, 7.700336538461538, 4.734114130434782), # 12
(2.944847844112769, 7.668587605657268, 8.46681785114967, 6.726007321859903, 7.737143429487181, 4.731912628321256), # 13
(2.9643986440347283, 7.723327160493828, 8.494271279634388, 6.748900966183574, 7.772243589743589, 4.729664251207729), # 14
(2.9833941505706756, 7.775901100852272, 8.520299445694086, 6.770635190217391, 7.8055889423076925, 4.7273692255434785), # 15
(3.001813493805482, 7.826229078984287, 8.544871554555842, 6.791180555555555, 7.8371314102564105, 4.725027777777778), # 16
(3.019635803824017, 7.874230747141554, 8.567956811446729, 6.810507623792271, 7.866822916666667, 4.722640134359904), # 17
(3.03684021071115, 7.919825757575757, 8.589524421593831, 6.82858695652174, 7.894615384615387, 4.72020652173913), # 18
(3.053405844551751, 7.962933762538579, 8.609543590224222, 6.845389115338164, 7.9204607371794875, 4.717727166364734), # 19
(3.0693118354306894, 8.003474414281705, 8.62798352256498, 6.860884661835749, 7.944310897435898, 4.71520229468599), # 20
(3.084537313432836, 8.041367365056816, 8.644813423843189, 6.875044157608696, 7.9661177884615375, 4.712632133152174), # 21
(3.099061408643059, 8.076532267115601, 8.660002499285918, 6.887838164251208, 7.985833333333332, 4.710016908212561), # 22
(3.1128632511462295, 8.108888772709737, 8.673519954120252, 6.899237243357488, 8.003409455128205, 4.707356846316426), # 23
(3.125921971027217, 8.138356534090908, 8.685334993573264, 6.909211956521739, 8.018798076923076, 4.704652173913043), # 24
(3.1382166983708903, 8.164855203510802, 8.695416822872037, 6.917732865338165, 8.03195112179487, 4.701903117451691), # 25
(3.1497265632621207, 8.188304433221099, 8.703734647243644, 6.9247705314009655, 8.042820512820512, 4.699109903381642), # 26
(3.160430695785777, 8.208623875473483, 8.710257671915166, 6.930295516304349, 8.051358173076924, 4.696272758152174), # 27
(3.1703082260267292, 8.22573318251964, 8.714955102113683, 6.934278381642512, 8.057516025641025, 4.69339190821256), # 28
(3.1793382840698468, 8.239552006611252, 8.717796143066266, 6.936689689009662, 8.061245993589743, 4.690467580012077), # 29
(3.1875, 8.25, 8.71875, 6.9375, 8.0625, 4.6875), # 30
(3.1951370284526854, 8.258678799715907, 8.718034948671496, 6.937353656045752, 8.062043661347518, 4.683376259786773), # 31
(3.202609175191816, 8.267242897727273, 8.715910024154589, 6.93691748366013, 8.06068439716312, 4.677024758454107), # 32
(3.2099197969948845, 8.275691228693182, 8.712405570652175, 6.936195772058824, 8.058436835106383, 4.66850768365817), # 33
(3.217072250639386, 8.284022727272728, 8.70755193236715, 6.935192810457517, 8.05531560283688, 4.657887223055139), # 34
(3.224069892902813, 8.292236328124998, 8.701379453502415, 6.933912888071895, 8.051335328014185, 4.645225564301183), # 35
(3.23091608056266, 8.300330965909092, 8.69391847826087, 6.932360294117648, 8.046510638297873, 4.630584895052474), # 36
(3.2376141703964194, 8.308305575284091, 8.68519935084541, 6.9305393178104575, 8.040856161347516, 4.614027402965184), # 37
(3.2441675191815853, 8.31615909090909, 8.675252415458937, 6.9284542483660125, 8.034386524822695, 4.595615275695485), # 38
(3.250579483695652, 8.323890447443182, 8.664108016304347, 6.926109375, 8.027116356382978, 4.57541070089955), # 39
(3.2568534207161126, 8.331498579545455, 8.651796497584542, 6.923508986928105, 8.019060283687942, 4.5534758662335495), # 40
(3.26299268702046, 8.338982421874999, 8.638348203502416, 6.920657373366013, 8.010232934397163, 4.529872959353657), # 41
(3.269000639386189, 8.34634090909091, 8.62379347826087, 6.917558823529411, 8.000648936170213, 4.504664167916042), # 42
(3.2748806345907933, 8.353572975852272, 8.608162666062801, 6.914217626633987, 7.990322916666666, 4.477911679576878), # 43
(3.2806360294117645, 8.360677556818182, 8.591486111111111, 6.910638071895424, 7.979269503546099, 4.449677681992337), # 44
(3.286270180626598, 8.367653586647727, 8.573794157608697, 6.906824448529411, 7.967503324468085, 4.420024362818591), # 45
(3.291786445012788, 8.374500000000001, 8.555117149758455, 6.902781045751634, 7.955039007092199, 4.389013909711811), # 46
(3.297188179347826, 8.381215731534091, 8.535485431763284, 6.898512152777777, 7.941891179078015, 4.356708510328169), # 47
(3.3024787404092075, 8.387799715909091, 8.514929347826087, 6.894022058823529, 7.928074468085106, 4.323170352323839), # 48
(3.307661484974424, 8.39425088778409, 8.493479242149759, 6.889315053104576, 7.91360350177305, 4.288461623354989), # 49
(3.312739769820972, 8.40056818181818, 8.471165458937199, 6.884395424836602, 7.898492907801418, 4.252644511077794), # 50
(3.317716951726343, 8.406750532670454, 8.448018342391304, 6.879267463235294, 7.882757313829787, 4.215781203148426), # 51
(3.322596387468031, 8.412796875, 8.424068236714975, 6.87393545751634, 7.86641134751773, 4.177933887223055), # 52
(3.3273814338235295, 8.41870614346591, 8.39934548611111, 6.868403696895425, 7.849469636524823, 4.139164750957854), # 53
(3.332075447570333, 8.424477272727271, 8.373880434782608, 6.8626764705882355, 7.831946808510638, 4.099535982008995), # 54
(3.336681785485933, 8.430109197443182, 8.347703426932366, 6.856758067810458, 7.813857491134752, 4.05910976803265), # 55
(3.341203804347826, 8.435600852272726, 8.320844806763285, 6.8506527777777775, 7.795216312056738, 4.017948296684991), # 56
(3.345644860933504, 8.440951171875001, 8.29333491847826, 6.844364889705882, 7.77603789893617, 3.9761137556221886), # 57
(3.3500083120204605, 8.44615909090909, 8.265204106280192, 6.837898692810458, 7.756336879432624, 3.9336683325004165), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(0, 5, 9, 3, 0, 0, 3, 7, 5, 2, 1, 0), # 0
(2, 9, 19, 9, 0, 0, 5, 10, 9, 4, 5, 0), # 1
(6, 18, 26, 13, 2, 0, 9, 15, 16, 8, 11, 0), # 2
(15, 27, 32, 17, 3, 0, 18, 23, 18, 13, 14, 0), # 3
(19, 33, 36, 25, 5, 0, 23, 29, 25, 16, 17, 0), # 4
(23, 36, 41, 28, 6, 0, 28, 37, 27, 18, 17, 0), # 5
(25, 38, 45, 33, 8, 0, 29, 38, 34, 19, 18, 0), # 6
(27, 41, 48, 38, 9, 0, 32, 40, 38, 21, 20, 0), # 7
(29, 48, 53, 43, 9, 0, 38, 43, 42, 29, 21, 0), # 8
(30, 57, 60, 47, 11, 0, 46, 50, 46, 37, 23, 0), # 9
(33, 62, 67, 50, 11, 0, 50, 60, 50, 40, 26, 0), # 10
(34, 66, 73, 52, 12, 0, 52, 63, 56, 48, 26, 0), # 11
(39, 68, 74, 54, 12, 0, 56, 72, 62, 50, 27, 0), # 12
(44, 69, 79, 57, 13, 0, 58, 76, 65, 57, 28, 0), # 13
(47, 75, 85, 59, 14, 0, 63, 80, 65, 61, 28, 0), # 14
(51, 77, 92, 61, 15, 0, 70, 90, 72, 65, 30, 0), # 15
(55, 83, 97, 66, 16, 0, 71, 104, 76, 66, 31, 0), # 16
(58, 88, 101, 68, 19, 0, 74, 109, 78, 72, 32, 0), # 17
(62, 92, 109, 70, 21, 0, 77, 114, 84, 75, 32, 0), # 18
(64, 99, 116, 72, 21, 0, 84, 116, 90, 76, 35, 0), # 19
(67, 106, 123, 74, 21, 0, 92, 125, 93, 77, 37, 0), # 20
(69, 114, 129, 76, 22, 0, 97, 130, 97, 80, 37, 0), # 21
(73, 120, 133, 77, 25, 0, 104, 134, 101, 85, 38, 0), # 22
(74, 125, 137, 80, 26, 0, 105, 139, 104, 90, 41, 0), # 23
(76, 134, 141, 81, 26, 0, 111, 145, 108, 97, 43, 0), # 24
(80, 142, 148, 83, 28, 0, 114, 151, 112, 98, 47, 0), # 25
(84, 148, 153, 85, 32, 0, 116, 151, 114, 102, 47, 0), # 26
(87, 152, 159, 89, 34, 0, 121, 161, 116, 105, 50, 0), # 27
(90, 164, 165, 92, 35, 0, 125, 173, 120, 107, 53, 0), # 28
(97, 172, 168, 95, 36, 0, 128, 176, 123, 111, 55, 0), # 29
(98, 184, 173, 95, 40, 0, 129, 180, 127, 116, 55, 0), # 30
(103, 192, 181, 98, 45, 0, 133, 187, 127, 120, 58, 0), # 31
(104, 206, 185, 102, 45, 0, 140, 194, 129, 123, 59, 0), # 32
(107, 213, 189, 104, 46, 0, 142, 199, 132, 125, 61, 0), # 33
(108, 220, 192, 107, 47, 0, 146, 210, 135, 130, 61, 0), # 34
(110, 225, 197, 111, 47, 0, 153, 216, 139, 135, 61, 0), # 35
(114, 232, 204, 114, 49, 0, 158, 223, 144, 136, 61, 0), # 36
(116, 238, 213, 122, 49, 0, 161, 232, 152, 136, 62, 0), # 37
(119, 242, 219, 124, 53, 0, 165, 237, 154, 136, 63, 0), # 38
(121, 248, 225, 125, 54, 0, 170, 244, 157, 144, 64, 0), # 39
(124, 256, 233, 128, 54, 0, 174, 247, 161, 153, 66, 0), # 40
(126, 259, 235, 130, 55, 0, 178, 256, 164, 159, 69, 0), # 41
(127, 267, 245, 130, 55, 0, 183, 268, 168, 163, 73, 0), # 42
(131, 278, 248, 132, 57, 0, 189, 273, 173, 167, 76, 0), # 43
(133, 285, 260, 134, 58, 0, 190, 277, 177, 168, 77, 0), # 44
(133, 294, 265, 135, 62, 0, 200, 281, 181, 174, 77, 0), # 45
(138, 298, 269, 135, 63, 0, 202, 285, 186, 177, 79, 0), # 46
(140, 303, 273, 135, 63, 0, 207, 294, 191, 182, 79, 0), # 47
(141, 313, 276, 139, 64, 0, 210, 297, 195, 186, 80, 0), # 48
(145, 319, 279, 143, 66, 0, 213, 303, 200, 188, 81, 0), # 49
(148, 325, 283, 148, 66, 0, 218, 312, 207, 191, 82, 0), # 50
(151, 331, 290, 150, 67, 0, 222, 317, 208, 194, 90, 0), # 51
(154, 342, 292, 154, 69, 0, 227, 324, 212, 201, 90, 0), # 52
(157, 350, 299, 157, 71, 0, 233, 333, 216, 204, 92, 0), # 53
(159, 357, 308, 158, 74, 0, 240, 339, 221, 206, 94, 0), # 54
(164, 367, 313, 160, 76, 0, 244, 344, 225, 210, 96, 0), # 55
(166, 373, 319, 161, 81, 0, 247, 347, 227, 213, 98, 0), # 56
(169, 376, 321, 164, 81, 0, 252, 353, 231, 221, 98, 0), # 57
(171, 383, 326, 166, 83, 0, 252, 354, 233, 224, 98, 0), # 58
(171, 383, 326, 166, 83, 0, 252, 354, 233, 224, 98, 0), # 59
)
passenger_arriving_rate = (
(2.649651558384548, 5.43716856060606, 4.79654161311054, 2.534510869565217, 1.428605769230769, 0.0, 4.75679347826087, 5.714423076923076, 3.801766304347826, 3.1976944087403596, 1.359292140151515, 0.0), # 0
(2.6745220100478, 5.497633278970258, 4.822449322514997, 2.5486257548309177, 1.439313301282051, 0.0, 4.7551721391908215, 5.757253205128204, 3.8229386322463768, 3.2149662150099974, 1.3744083197425645, 0.0), # 1
(2.699108477221734, 5.557201122334455, 4.8477420736932295, 2.562429951690821, 1.4497948717948717, 0.0, 4.753501207729468, 5.799179487179487, 3.8436449275362317, 3.23182804912882, 1.3893002805836137, 0.0), # 2
(2.72339008999122, 5.6158078125, 4.872401389781491, 2.575911684782608, 1.4600408653846155, 0.0, 4.7517809103260875, 5.840163461538462, 3.863867527173912, 3.2482675931876606, 1.403951953125, 0.0), # 3
(2.747345978441128, 5.673389071268238, 4.896408793916024, 2.589059178743961, 1.4700416666666667, 0.0, 4.750011473429951, 5.880166666666667, 3.883588768115942, 3.2642725292773487, 1.4183472678170594, 0.0), # 4
(2.7709552726563262, 5.729880620440516, 4.919745809233076, 2.6018606582125603, 1.47978766025641, 0.0, 4.748193123490338, 5.91915064102564, 3.9027909873188404, 3.279830539488717, 1.432470155110129, 0.0), # 5
(2.794197102721686, 5.785218181818181, 4.942393958868895, 2.614304347826087, 1.4892692307692306, 0.0, 4.746326086956522, 5.957076923076922, 3.9214565217391306, 3.294929305912597, 1.4463045454545453, 0.0), # 6
(2.817050598722076, 5.83933747720258, 4.964334765959725, 2.626378472222222, 1.498476762820513, 0.0, 4.744410590277778, 5.993907051282052, 3.939567708333333, 3.309556510639817, 1.459834369300645, 0.0), # 7
(2.8394948907423667, 5.89217422839506, 4.985549753641817, 2.638071256038647, 1.5074006410256409, 0.0, 4.7424468599033816, 6.0296025641025635, 3.9571068840579704, 3.3236998357612113, 1.473043557098765, 0.0), # 8
(2.8615091088674274, 5.943664157196969, 5.006020445051414, 2.649370923913043, 1.5160312499999997, 0.0, 4.740435122282609, 6.064124999999999, 3.9740563858695652, 3.3373469633676094, 1.4859160392992423, 0.0), # 9
(2.8830723831821286, 5.993742985409652, 5.025728363324764, 2.660265700483092, 1.5243589743589743, 0.0, 4.738375603864734, 6.097435897435897, 3.990398550724638, 3.3504855755498424, 1.498435746352413, 0.0), # 10
(2.9041638437713395, 6.042346434834456, 5.044655031598114, 2.6707438103864733, 1.5323741987179484, 0.0, 4.736268531099034, 6.129496794871794, 4.0061157155797105, 3.3631033543987425, 1.510586608708614, 0.0), # 11
(2.92476262071993, 6.089410227272726, 5.062781973007712, 2.680793478260869, 1.5400673076923075, 0.0, 4.734114130434782, 6.16026923076923, 4.021190217391304, 3.375187982005141, 1.5223525568181815, 0.0), # 12
(2.944847844112769, 6.134870084525814, 5.080090710689802, 2.690402928743961, 1.547428685897436, 0.0, 4.731912628321256, 6.189714743589744, 4.035604393115942, 3.386727140459868, 1.5337175211314535, 0.0), # 13
(2.9643986440347283, 6.1786617283950624, 5.096562767780632, 2.699560386473429, 1.5544487179487176, 0.0, 4.729664251207729, 6.217794871794871, 4.049340579710144, 3.397708511853755, 1.5446654320987656, 0.0), # 14
(2.9833941505706756, 6.220720880681816, 5.112179667416451, 2.708254076086956, 1.5611177884615384, 0.0, 4.7273692255434785, 6.2444711538461535, 4.062381114130434, 3.408119778277634, 1.555180220170454, 0.0), # 15
(3.001813493805482, 6.26098326318743, 5.126922932733505, 2.716472222222222, 1.5674262820512819, 0.0, 4.725027777777778, 6.2697051282051275, 4.074708333333333, 3.4179486218223363, 1.5652458157968574, 0.0), # 16
(3.019635803824017, 6.299384597713242, 5.140774086868038, 2.724203049516908, 1.5733645833333332, 0.0, 4.722640134359904, 6.293458333333333, 4.0863045742753625, 3.4271827245786914, 1.5748461494283106, 0.0), # 17
(3.03684021071115, 6.3358606060606055, 5.153714652956299, 2.7314347826086958, 1.578923076923077, 0.0, 4.72020652173913, 6.315692307692308, 4.097152173913043, 3.435809768637532, 1.5839651515151514, 0.0), # 18
(3.053405844551751, 6.370347010030863, 5.165726154134533, 2.738155646135265, 1.5840921474358973, 0.0, 4.717727166364734, 6.336368589743589, 4.107233469202898, 3.4438174360896885, 1.5925867525077158, 0.0), # 19
(3.0693118354306894, 6.402779531425363, 5.1767901135389875, 2.7443538647342995, 1.5888621794871793, 0.0, 4.71520229468599, 6.355448717948717, 4.11653079710145, 3.4511934090259917, 1.6006948828563408, 0.0), # 20
(3.084537313432836, 6.433093892045452, 5.186888054305913, 2.750017663043478, 1.5932235576923073, 0.0, 4.712632133152174, 6.372894230769229, 4.125026494565217, 3.4579253695372754, 1.608273473011363, 0.0), # 21
(3.099061408643059, 6.46122581369248, 5.19600149957155, 2.7551352657004826, 1.5971666666666662, 0.0, 4.710016908212561, 6.388666666666665, 4.132702898550725, 3.464000999714367, 1.61530645342312, 0.0), # 22
(3.1128632511462295, 6.487111018167789, 5.204111972472151, 2.759694897342995, 1.6006818910256408, 0.0, 4.707356846316426, 6.402727564102563, 4.139542346014493, 3.4694079816481005, 1.6217777545419472, 0.0), # 23
(3.125921971027217, 6.5106852272727265, 5.211200996143958, 2.763684782608695, 1.6037596153846152, 0.0, 4.704652173913043, 6.415038461538461, 4.1455271739130435, 3.474133997429305, 1.6276713068181816, 0.0), # 24
(3.1382166983708903, 6.531884162808641, 5.217250093723222, 2.7670931461352657, 1.606390224358974, 0.0, 4.701903117451691, 6.425560897435896, 4.150639719202899, 3.4781667291488145, 1.6329710407021603, 0.0), # 25
(3.1497265632621207, 6.550643546576878, 5.222240788346187, 2.7699082125603858, 1.6085641025641022, 0.0, 4.699109903381642, 6.434256410256409, 4.154862318840579, 3.4814938588974575, 1.6376608866442195, 0.0), # 26
(3.160430695785777, 6.566899100378786, 5.226154603149099, 2.772118206521739, 1.6102716346153847, 0.0, 4.696272758152174, 6.441086538461539, 4.158177309782609, 3.484103068766066, 1.6417247750946966, 0.0), # 27
(3.1703082260267292, 6.580586546015712, 5.228973061268209, 2.7737113526570045, 1.6115032051282048, 0.0, 4.69339190821256, 6.446012820512819, 4.160567028985507, 3.4859820408454727, 1.645146636503928, 0.0), # 28
(3.1793382840698468, 6.591641605289001, 5.230677685839759, 2.7746758756038647, 1.6122491987179486, 0.0, 4.690467580012077, 6.448996794871794, 4.162013813405797, 3.487118457226506, 1.6479104013222503, 0.0), # 29
(3.1875, 6.6, 5.23125, 2.775, 1.6124999999999998, 0.0, 4.6875, 6.449999999999999, 4.1625, 3.4875, 1.65, 0.0), # 30
(3.1951370284526854, 6.606943039772726, 5.230820969202898, 2.7749414624183006, 1.6124087322695035, 0.0, 4.683376259786773, 6.449634929078014, 4.162412193627451, 3.4872139794685983, 1.6517357599431814, 0.0), # 31
(3.202609175191816, 6.613794318181818, 5.229546014492753, 2.7747669934640515, 1.6121368794326238, 0.0, 4.677024758454107, 6.448547517730495, 4.162150490196078, 3.4863640096618354, 1.6534485795454545, 0.0), # 32
(3.2099197969948845, 6.620552982954545, 5.227443342391305, 2.774478308823529, 1.6116873670212764, 0.0, 4.66850768365817, 6.446749468085105, 4.161717463235294, 3.4849622282608697, 1.6551382457386363, 0.0), # 33
(3.217072250639386, 6.627218181818182, 5.224531159420289, 2.7740771241830067, 1.6110631205673758, 0.0, 4.657887223055139, 6.444252482269503, 4.16111568627451, 3.4830207729468596, 1.6568045454545455, 0.0), # 34
(3.224069892902813, 6.633789062499998, 5.220827672101449, 2.773565155228758, 1.6102670656028368, 0.0, 4.645225564301183, 6.441068262411347, 4.160347732843137, 3.480551781400966, 1.6584472656249996, 0.0), # 35
(3.23091608056266, 6.6402647727272734, 5.2163510869565215, 2.7729441176470586, 1.6093021276595745, 0.0, 4.630584895052474, 6.437208510638298, 4.159416176470589, 3.477567391304347, 1.6600661931818184, 0.0), # 36
(3.2376141703964194, 6.6466444602272725, 5.211119610507246, 2.7722157271241827, 1.6081712322695032, 0.0, 4.614027402965184, 6.432684929078013, 4.158323590686274, 3.474079740338164, 1.6616611150568181, 0.0), # 37
(3.2441675191815853, 6.652927272727272, 5.205151449275362, 2.7713816993464047, 1.6068773049645388, 0.0, 4.595615275695485, 6.427509219858155, 4.157072549019607, 3.4701009661835744, 1.663231818181818, 0.0), # 38
(3.250579483695652, 6.659112357954545, 5.198464809782608, 2.7704437499999996, 1.6054232712765955, 0.0, 4.57541070089955, 6.421693085106382, 4.155665625, 3.4656432065217384, 1.6647780894886361, 0.0), # 39
(3.2568534207161126, 6.6651988636363635, 5.191077898550724, 2.7694035947712417, 1.6038120567375882, 0.0, 4.5534758662335495, 6.415248226950353, 4.154105392156863, 3.4607185990338163, 1.6662997159090909, 0.0), # 40
(3.26299268702046, 6.671185937499998, 5.1830089221014495, 2.768262949346405, 1.6020465868794325, 0.0, 4.529872959353657, 6.40818634751773, 4.152394424019608, 3.455339281400966, 1.6677964843749995, 0.0), # 41
(3.269000639386189, 6.677072727272728, 5.174276086956522, 2.767023529411764, 1.6001297872340425, 0.0, 4.504664167916042, 6.40051914893617, 4.150535294117646, 3.4495173913043478, 1.669268181818182, 0.0), # 42
(3.2748806345907933, 6.682858380681817, 5.164897599637681, 2.7656870506535944, 1.5980645833333331, 0.0, 4.477911679576878, 6.3922583333333325, 4.148530575980392, 3.4432650664251203, 1.6707145951704543, 0.0), # 43
(3.2806360294117645, 6.688542045454545, 5.154891666666667, 2.7642552287581696, 1.5958539007092198, 0.0, 4.449677681992337, 6.383415602836879, 4.146382843137254, 3.4365944444444443, 1.6721355113636363, 0.0), # 44
(3.286270180626598, 6.694122869318181, 5.144276494565218, 2.7627297794117642, 1.593500664893617, 0.0, 4.420024362818591, 6.374002659574468, 4.144094669117647, 3.4295176630434785, 1.6735307173295453, 0.0), # 45
(3.291786445012788, 6.6996, 5.133070289855073, 2.761112418300653, 1.5910078014184397, 0.0, 4.389013909711811, 6.364031205673759, 4.14166862745098, 3.4220468599033818, 1.6749, 0.0), # 46
(3.297188179347826, 6.704972585227273, 5.12129125905797, 2.759404861111111, 1.588378235815603, 0.0, 4.356708510328169, 6.353512943262412, 4.139107291666666, 3.4141941727053133, 1.6762431463068181, 0.0), # 47
(3.3024787404092075, 6.710239772727273, 5.108957608695651, 2.757608823529411, 1.5856148936170211, 0.0, 4.323170352323839, 6.3424595744680845, 4.136413235294117, 3.4059717391304343, 1.6775599431818182, 0.0), # 48
(3.307661484974424, 6.715400710227271, 5.096087545289855, 2.75572602124183, 1.5827207003546098, 0.0, 4.288461623354989, 6.330882801418439, 4.133589031862745, 3.3973916968599034, 1.6788501775568176, 0.0), # 49
(3.312739769820972, 6.720454545454543, 5.082699275362319, 2.7537581699346405, 1.5796985815602835, 0.0, 4.252644511077794, 6.318794326241134, 4.130637254901961, 3.388466183574879, 1.6801136363636358, 0.0), # 50
(3.317716951726343, 6.725400426136363, 5.068811005434783, 2.7517069852941174, 1.5765514627659571, 0.0, 4.215781203148426, 6.306205851063829, 4.127560477941176, 3.3792073369565214, 1.6813501065340908, 0.0), # 51
(3.322596387468031, 6.730237499999999, 5.054440942028985, 2.7495741830065357, 1.573282269503546, 0.0, 4.177933887223055, 6.293129078014184, 4.124361274509804, 3.3696272946859898, 1.6825593749999999, 0.0), # 52
(3.3273814338235295, 6.7349649147727275, 5.039607291666666, 2.7473614787581697, 1.5698939273049646, 0.0, 4.139164750957854, 6.279575709219858, 4.121042218137255, 3.359738194444444, 1.6837412286931819, 0.0), # 53
(3.332075447570333, 6.739581818181817, 5.024328260869565, 2.745070588235294, 1.5663893617021276, 0.0, 4.099535982008995, 6.2655574468085105, 4.117605882352941, 3.3495521739130427, 1.6848954545454542, 0.0), # 54
(3.336681785485933, 6.744087357954545, 5.008622056159419, 2.7427032271241827, 1.5627714982269503, 0.0, 4.05910976803265, 6.251085992907801, 4.114054840686275, 3.3390813707729463, 1.6860218394886362, 0.0), # 55
(3.341203804347826, 6.74848068181818, 4.9925068840579705, 2.740261111111111, 1.5590432624113475, 0.0, 4.017948296684991, 6.23617304964539, 4.110391666666667, 3.328337922705314, 1.687120170454545, 0.0), # 56
(3.345644860933504, 6.752760937500001, 4.976000951086956, 2.7377459558823527, 1.5552075797872338, 0.0, 3.9761137556221886, 6.220830319148935, 4.106618933823529, 3.317333967391304, 1.6881902343750002, 0.0), # 57
(3.3500083120204605, 6.756927272727271, 4.959122463768115, 2.7351594771241827, 1.5512673758865245, 0.0, 3.9336683325004165, 6.205069503546098, 4.102739215686275, 3.3060816425120767, 1.6892318181818178, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
67, # 1
)
| 112.528358
| 215
| 0.727724
| 5,147
| 37,697
| 5.327764
| 0.216825
| 0.315075
| 0.249435
| 0.472613
| 0.332251
| 0.3301
| 0.3301
| 0.3301
| 0.3301
| 0.3301
| 0
| 0.817979
| 0.119744
| 37,697
| 334
| 216
| 112.865269
| 0.008408
| 0.032125
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4e3a67bc274883baf27d3e4d3e4ad196d7ddbc63
| 33
|
py
|
Python
|
iturmas/decorators/__init__.py
|
daniel-ufabc/match-classes
|
2783cdf1c7363fcc14023a6cacad697b6af0f011
|
[
"MIT"
] | null | null | null |
iturmas/decorators/__init__.py
|
daniel-ufabc/match-classes
|
2783cdf1c7363fcc14023a6cacad697b6af0f011
|
[
"MIT"
] | null | null | null |
iturmas/decorators/__init__.py
|
daniel-ufabc/match-classes
|
2783cdf1c7363fcc14023a6cacad697b6af0f011
|
[
"MIT"
] | null | null | null |
from .auth import login_required
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9da470ea36af0b767f746d020e41a7f0c5dba94a
| 153
|
py
|
Python
|
python/niveau1/2-Repetitions/6.py
|
ThomasProg/France-IOI
|
03ea502e03f686d74ecf31a17273aded7b8e8a1f
|
[
"MIT"
] | 2
|
2022-02-13T13:35:13.000Z
|
2022-03-31T21:02:11.000Z
|
python/niveau1/2-Repetitions/6.py
|
ThomasProg/France-IOI
|
03ea502e03f686d74ecf31a17273aded7b8e8a1f
|
[
"MIT"
] | null | null | null |
python/niveau1/2-Repetitions/6.py
|
ThomasProg/France-IOI
|
03ea502e03f686d74ecf31a17273aded7b8e8a1f
|
[
"MIT"
] | 1
|
2020-11-15T15:21:24.000Z
|
2020-11-15T15:21:24.000Z
|
for i in range(30):
print("a_", end="")
print()
for i in range(30):
print("b_", end="")
print()
for i in range(30):
print("c_", end="")
| 15.3
| 23
| 0.51634
| 26
| 153
| 2.923077
| 0.384615
| 0.157895
| 0.236842
| 0.434211
| 0.921053
| 0.921053
| 0.684211
| 0.684211
| 0
| 0
| 0
| 0.051724
| 0.24183
| 153
| 10
| 24
| 15.3
| 0.603448
| 0
| 0
| 0.625
| 0
| 0
| 0.038961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.625
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9dabcfa6524e1e4a0e2b51dbe24a327024815ea3
| 24
|
py
|
Python
|
emailutil/__init__.py
|
cityofaustin/atd-utils-email
|
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
|
[
"CC0-1.0"
] | null | null | null |
emailutil/__init__.py
|
cityofaustin/atd-utils-email
|
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
|
[
"CC0-1.0"
] | null | null | null |
emailutil/__init__.py
|
cityofaustin/atd-utils-email
|
bcf2c55fe770745a2ed6da22e44971ef6ceaae37
|
[
"CC0-1.0"
] | null | null | null |
from .emailutil import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9db042c12b1460a61eed0c0cb77f85501b0f72a1
| 215
|
py
|
Python
|
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
|
FHoffmannCode/dbnd
|
82beee1a8c752235bf21b4b0ceace5ab25410e52
|
[
"Apache-2.0"
] | null | null | null |
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
|
FHoffmannCode/dbnd
|
82beee1a8c752235bf21b4b0ceace5ab25410e52
|
[
"Apache-2.0"
] | null | null | null |
plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py
|
FHoffmannCode/dbnd
|
82beee1a8c752235bf21b4b0ceace5ab25410e52
|
[
"Apache-2.0"
] | null | null | null |
from dbnd._core.commands.metrics import log_snowflake_table
from dbnd_snowflake.snowflake_resources import log_snowflake_resource_usage
__all__ = [
"log_snowflake_resource_usage",
"log_snowflake_table",
]
| 23.888889
| 75
| 0.827907
| 27
| 215
| 5.962963
| 0.481481
| 0.298137
| 0.223602
| 0.310559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111628
| 215
| 8
| 76
| 26.875
| 0.842932
| 0
| 0
| 0
| 0
| 0
| 0.218605
| 0.130233
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9dfff168d101cb9f78868b0ee56c24261cd170c9
| 73
|
py
|
Python
|
01-sample-instance/settings.py
|
diodonfrost/pulumi-aws-examples
|
2fa07f3219dc01d00051559eb207c547d3554232
|
[
"Apache-2.0"
] | null | null | null |
01-sample-instance/settings.py
|
diodonfrost/pulumi-aws-examples
|
2fa07f3219dc01d00051559eb207c547d3554232
|
[
"Apache-2.0"
] | null | null | null |
01-sample-instance/settings.py
|
diodonfrost/pulumi-aws-examples
|
2fa07f3219dc01d00051559eb207c547d3554232
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf8
vpc_cidr = "192.168.0.0/16"
http_cidr = "192.168.1.0/24"
| 14.6
| 28
| 0.643836
| 16
| 73
| 2.8125
| 0.6875
| 0.311111
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.136986
| 73
| 4
| 29
| 18.25
| 0.380952
| 0.164384
| 0
| 0
| 0
| 0
| 0.474576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ae1ad8c506c36a888f234786efecf582422e3003
| 35
|
py
|
Python
|
src/artifice/scraper/supervisor/__init__.py
|
artifice-project/artifice-scraper
|
f224a0da22162fd479d6b9f9095ff5cae4723716
|
[
"MIT"
] | null | null | null |
src/artifice/scraper/supervisor/__init__.py
|
artifice-project/artifice-scraper
|
f224a0da22162fd479d6b9f9095ff5cae4723716
|
[
"MIT"
] | 5
|
2019-09-18T19:17:14.000Z
|
2021-03-20T01:46:06.000Z
|
src/artifice/scraper/supervisor/__init__.py
|
artifice-project/artifice-scraper
|
f224a0da22162fd479d6b9f9095ff5cae4723716
|
[
"MIT"
] | null | null | null |
from .supervisor import Supervisor
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae867f0e402cb89db3cccc626cd6f645b33f32f2
| 40
|
py
|
Python
|
condensate/core/__init__.py
|
Zwierlein/condensate
|
34908b7e99785e9a4a9c5c743fe1a8e6f4cbf4ad
|
[
"MIT"
] | 4
|
2021-07-24T10:57:06.000Z
|
2021-12-11T01:24:54.000Z
|
condensate/core/__init__.py
|
Zwierlein/condensate
|
34908b7e99785e9a4a9c5c743fe1a8e6f4cbf4ad
|
[
"MIT"
] | 9
|
2021-07-15T04:13:23.000Z
|
2021-08-02T21:57:00.000Z
|
condensate/core/__init__.py
|
Zwierlein/condensate
|
34908b7e99785e9a4a9c5c743fe1a8e6f4cbf4ad
|
[
"MIT"
] | 2
|
2021-07-21T10:39:30.000Z
|
2021-08-01T03:05:14.000Z
|
from condensate.core.build import gpcore
| 40
| 40
| 0.875
| 6
| 40
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
88521be531a73b3f205941d7145e1d213b76932c
| 117
|
py
|
Python
|
tests/test_controllers/test_demo.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 6
|
2015-01-28T05:59:08.000Z
|
2018-01-09T07:48:57.000Z
|
tests/test_controllers/test_demo.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 2
|
2020-05-09T16:36:43.000Z
|
2020-05-09T16:52:35.000Z
|
tests/test_controllers/test_demo.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 1
|
2016-01-13T07:19:44.000Z
|
2016-01-13T07:19:44.000Z
|
from nose.tools import assert_equal
from tests.fixtures import WebTest
class TestDemoController(WebTest):
pass
| 16.714286
| 35
| 0.811966
| 15
| 117
| 6.266667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 117
| 6
| 36
| 19.5
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8887cdf2cc8ae9604a5a9ce44664b255c6cabd67
| 64
|
py
|
Python
|
hanlp/datasets/ner/__init__.py
|
v-smwang/HanLP
|
98db7a649110fca4307acbd6a26f2b5bb1159efc
|
[
"Apache-2.0"
] | 27,208
|
2015-03-27T10:25:45.000Z
|
2022-03-31T13:26:32.000Z
|
hanlp/datasets/ner/__init__.py
|
hushaoyun/HanLP
|
967b52404c9d0adbc0cff2699690c127ecfca36e
|
[
"Apache-2.0"
] | 1,674
|
2015-03-30T06:36:44.000Z
|
2022-03-16T01:52:56.000Z
|
hanlp/datasets/ner/__init__.py
|
hushaoyun/HanLP
|
967b52404c9d0adbc0cff2699690c127ecfca36e
|
[
"Apache-2.0"
] | 7,710
|
2015-03-27T08:07:57.000Z
|
2022-03-31T14:57:23.000Z
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-06 15:32
| 21.333333
| 24
| 0.59375
| 11
| 64
| 3.454545
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240741
| 0.15625
| 64
| 3
| 24
| 21.333333
| 0.462963
| 0.90625
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
31f5d73f045c9db55e784a4166f4f9708822341f
| 5,331
|
py
|
Python
|
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-09 12:19
from __future__ import unicode_literals
import core.model_fields
import core.models
import core.validators
import core.wagtail_fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('great_international', '0022_auto_20190508_1300'),
]
operations = [
migrations.CreateModel(
name='InternationalEUExitFormPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)),
('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')),
('breadcrumbs_label', models.CharField(max_length=50)),
('heading', models.CharField(max_length=255)),
('body_text', core.model_fields.MarkdownField(validators=[core.validators.slug_hyperlinks])),
('submit_button_text', models.CharField(max_length=50)),
('disclaimer', models.TextField(max_length=500)),
('first_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('first_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('last_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('last_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('email_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('email_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('organisation_type_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('organisation_type_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('company_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('company_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('country_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('country_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('city_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('city_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('comment_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('comment_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
],
options={
'abstract': False,
},
bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='InternationalEUExitFormSuccessPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)),
('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')),
('breadcrumbs_label', models.CharField(max_length=50)),
('heading', models.CharField(max_length=255, verbose_name='Title')),
('body_text', models.CharField(max_length=255, verbose_name='Body text')),
('next_title', models.CharField(max_length=255, verbose_name='Title')),
('next_body_text', models.CharField(max_length=255, verbose_name='Body text')),
],
options={
'abstract': False,
},
bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'),
),
]
| 74.041667
| 285
| 0.673982
| 611
| 5,331
| 5.631751
| 0.214403
| 0.070619
| 0.083987
| 0.055798
| 0.797152
| 0.788434
| 0.788434
| 0.788434
| 0.771287
| 0.731764
| 0
| 0.026927
| 0.191896
| 5,331
| 71
| 286
| 75.084507
| 0.77182
| 0.012943
| 0
| 0.34375
| 1
| 0.03125
| 0.255752
| 0.042404
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.109375
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ee0b159a9b052e35cbc0b56e022fa3be6c4dec93
| 151
|
py
|
Python
|
tests/testing.py
|
Shlol762/physics
|
a142e812bac2da8edec36cdd814b49ea765d9cdc
|
[
"MIT"
] | null | null | null |
tests/testing.py
|
Shlol762/physics
|
a142e812bac2da8edec36cdd814b49ea765d9cdc
|
[
"MIT"
] | null | null | null |
tests/testing.py
|
Shlol762/physics
|
a142e812bac2da8edec36cdd814b49ea765d9cdc
|
[
"MIT"
] | null | null | null |
from physics import *
s1, s2 = Speed(9, 3, unit='cm/s', extra_units=['cm/h']), Speed(9, 2, unit='cm/h', extra_units=['cm/h'])
print(s2.distance.unit)
| 30.2
| 103
| 0.635762
| 29
| 151
| 3.241379
| 0.586207
| 0.095745
| 0.255319
| 0.276596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052239
| 0.112583
| 151
| 5
| 104
| 30.2
| 0.649254
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ee62497549e11786eed94ddaf1b321e00e07b0ad
| 43
|
py
|
Python
|
MultiSourceDataFeeds/Providers/Factal/factal/__init__.py
|
Esri/ArcGIS-Solutions-for-Business
|
306b778bb6246f13766ce14245c6ba2aab42ba08
|
[
"Apache-2.0"
] | 1
|
2021-01-30T04:43:31.000Z
|
2021-01-30T04:43:31.000Z
|
MultiSourceDataFeeds/Providers/Factal/factal/__init__.py
|
Esri/ArcGIS-Solutions-for-Business
|
306b778bb6246f13766ce14245c6ba2aab42ba08
|
[
"Apache-2.0"
] | null | null | null |
MultiSourceDataFeeds/Providers/Factal/factal/__init__.py
|
Esri/ArcGIS-Solutions-for-Business
|
306b778bb6246f13766ce14245c6ba2aab42ba08
|
[
"Apache-2.0"
] | null | null | null |
from .factal import *
from .schema import *
| 21.5
| 21
| 0.744186
| 6
| 43
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 22
| 21.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c99498c0faf71a46ad1d7a4f4be4a7ad4fc54402
| 172
|
py
|
Python
|
Coursera/separa_palavras.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
Coursera/separa_palavras.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
Coursera/separa_palavras.py
|
tobiaspontes/ScriptsPython
|
21ed779e49adca500ce5815dd100f4ec999a2571
|
[
"MIT"
] | null | null | null |
import re
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
print('lista de palavras: ', frase.split())
| 28.666667
| 84
| 0.709302
| 26
| 172
| 4.653846
| 0.730769
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 172
| 5
| 85
| 34.4
| 0.864286
| 0.430233
| 0
| 0
| 0
| 0
| 0.206522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c9b8a09501b36968a133bb1816fb52f2dd36b599
| 42
|
py
|
Python
|
examples/modules/object_tracker/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | 7
|
2020-09-02T08:39:22.000Z
|
2021-10-13T18:13:04.000Z
|
examples/modules/object_tracker/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
examples/modules/object_tracker/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
from .object_tracker import ObjectTracker
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c9bd340296dec5cc98f4fa44de42146d4f90d4d2
| 123
|
py
|
Python
|
python/basic_utils.py
|
goten-team/Goten
|
690f1429b62c70caec72f4010ee5b7a9786f0d25
|
[
"MIT"
] | 17
|
2020-04-28T09:18:28.000Z
|
2021-12-28T08:38:00.000Z
|
python/basic_utils.py
|
goten-team/Goten
|
690f1429b62c70caec72f4010ee5b7a9786f0d25
|
[
"MIT"
] | 2
|
2021-09-26T04:10:51.000Z
|
2022-03-31T05:28:25.000Z
|
python/basic_utils.py
|
goten-team/Goten
|
690f1429b62c70caec72f4010ee5b7a9786f0d25
|
[
"MIT"
] | 2
|
2021-09-26T05:06:17.000Z
|
2021-12-14T16:25:06.000Z
|
import hashlib
def str_hash(s):
return int(int(hashlib.sha224(s.encode('utf-8')).hexdigest(), 16) % ((1 << 62) - 1))
| 20.5
| 88
| 0.617886
| 20
| 123
| 3.75
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 0.154472
| 123
| 5
| 89
| 24.6
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0.04065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c9d7bec33f61ca45367ed74051d9e674ed9eb713
| 211
|
py
|
Python
|
unit_03/random/passwd1.py
|
janusnic/21v-pyqt
|
8ee3828e1c6e6259367d6cedbd63b9057cf52c24
|
[
"MIT"
] | null | null | null |
unit_03/random/passwd1.py
|
janusnic/21v-pyqt
|
8ee3828e1c6e6259367d6cedbd63b9057cf52c24
|
[
"MIT"
] | null | null | null |
unit_03/random/passwd1.py
|
janusnic/21v-pyqt
|
8ee3828e1c6e6259367d6cedbd63b9057cf52c24
|
[
"MIT"
] | 2
|
2019-11-14T15:04:22.000Z
|
2021-10-31T07:34:46.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
генератор случайных чисел
"""
import random
print ''.join([random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) for x in range(12)])
| 26.375
| 120
| 0.739336
| 22
| 211
| 7.090909
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.090047
| 211
| 8
| 120
| 26.375
| 0.75
| 0.180095
| 0
| 0
| 0
| 0
| 0.438849
| 0.438849
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a006b38b61a96ab48414b8fa22ea5745e9fed4bd
| 22
|
py
|
Python
|
Scripts.py
|
MattOstgard/HLSL_ST3
|
fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1
|
[
"MIT"
] | 10
|
2017-11-30T19:43:48.000Z
|
2022-02-02T11:10:43.000Z
|
Scripts.py
|
MattOstgard/HLSL_ST3
|
fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1
|
[
"MIT"
] | 27
|
2018-11-06T16:10:57.000Z
|
2022-02-25T22:55:33.000Z
|
Scripts.py
|
MattOstgard/HLSL_ST3
|
fbb3dcc7acfeb9c04208dc68b8ff020c76d483b1
|
[
"MIT"
] | 2
|
2018-03-24T04:09:45.000Z
|
2018-11-06T14:54:10.000Z
|
from .Scripts import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a008eb9d3812a49e20b4001c7d7b0873ff6642c9
| 106
|
py
|
Python
|
tests/exog/random/random_exog_32_20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/exog/random/random_exog_32_20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/exog/random/random_exog_32_20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 32,20);
| 26.5
| 60
| 0.858491
| 15
| 106
| 5.8
| 0.733333
| 0.229885
| 0.436782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040404
| 0.066038
| 106
| 4
| 61
| 26.5
| 0.838384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4e751966b10b05f698edd3d37469d6c2ff784045
| 31
|
py
|
Python
|
bubble_io/__init__.py
|
jasontyping/bubble-io-python
|
487dd253e85814a012df4a5a5a6a08f023517641
|
[
"MIT"
] | null | null | null |
bubble_io/__init__.py
|
jasontyping/bubble-io-python
|
487dd253e85814a012df4a5a5a6a08f023517641
|
[
"MIT"
] | null | null | null |
bubble_io/__init__.py
|
jasontyping/bubble-io-python
|
487dd253e85814a012df4a5a5a6a08f023517641
|
[
"MIT"
] | 1
|
2020-10-25T08:31:59.000Z
|
2020-10-25T08:31:59.000Z
|
from .bubbleio import BubbleIo
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4e769aee426de55532dd683d9dd832dcae724616
| 68
|
py
|
Python
|
python/pandas_pbf/core.py
|
ccharlesgb/pandas-pbf
|
8c5b1af2c291cfd485b1296a1a5ba34ddc93d995
|
[
"MIT"
] | null | null | null |
python/pandas_pbf/core.py
|
ccharlesgb/pandas-pbf
|
8c5b1af2c291cfd485b1296a1a5ba34ddc93d995
|
[
"MIT"
] | null | null | null |
python/pandas_pbf/core.py
|
ccharlesgb/pandas-pbf
|
8c5b1af2c291cfd485b1296a1a5ba34ddc93d995
|
[
"MIT"
] | null | null | null |
import pandas as pd
def dump(df: pd.DataFrame) -> bytes:
pass
| 11.333333
| 36
| 0.661765
| 11
| 68
| 4.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 68
| 5
| 37
| 13.6
| 0.865385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4ea5498deec294ffeeebf2d2ad50bbf782de71a8
| 141
|
py
|
Python
|
esteid/idcard/__init__.py
|
thorgate/django-esteid
|
4a4227b20dca7db5441a3514f724f1404575562c
|
[
"BSD-3-Clause"
] | 17
|
2016-03-30T09:20:19.000Z
|
2022-01-17T12:04:03.000Z
|
esteid/idcard/__init__.py
|
thorgate/django-esteid
|
4a4227b20dca7db5441a3514f724f1404575562c
|
[
"BSD-3-Clause"
] | 15
|
2016-02-22T22:49:07.000Z
|
2021-11-09T12:29:35.000Z
|
esteid/idcard/__init__.py
|
thorgate/django-esteid
|
4a4227b20dca7db5441a3514f724f1404575562c
|
[
"BSD-3-Clause"
] | 2
|
2016-07-27T10:57:52.000Z
|
2017-10-05T13:15:59.000Z
|
__all__ = ["BaseIdCardAuthenticationView", "IdCardSigner"]
from .signer import IdCardSigner
from .views import BaseIdCardAuthenticationView
| 28.2
| 58
| 0.836879
| 11
| 141
| 10.363636
| 0.636364
| 0.280702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092199
| 141
| 4
| 59
| 35.25
| 0.890625
| 0
| 0
| 0
| 0
| 0
| 0.283688
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4eb27769bbc6f1af6058f15f8a964479f5a48ebc
| 484
|
py
|
Python
|
crosshair/libimpl/__init__.py
|
mristin/CrossHair
|
66a44a0d10021e0b1e2d847a677274e62ddd1e9d
|
[
"MIT"
] | null | null | null |
crosshair/libimpl/__init__.py
|
mristin/CrossHair
|
66a44a0d10021e0b1e2d847a677274e62ddd1e9d
|
[
"MIT"
] | null | null | null |
crosshair/libimpl/__init__.py
|
mristin/CrossHair
|
66a44a0d10021e0b1e2d847a677274e62ddd1e9d
|
[
"MIT"
] | null | null | null |
from crosshair.libimpl import builtinslib
from crosshair.libimpl import collectionslib
from crosshair.libimpl import datetimelib
from crosshair.libimpl import mathlib
from crosshair.libimpl import randomlib
from crosshair.libimpl import relib
def make_registrations():
builtinslib.make_registrations()
collectionslib.make_registrations()
datetimelib.make_registrations()
mathlib.make_registrations()
randomlib.make_registrations()
relib.make_registrations()
| 30.25
| 44
| 0.82438
| 51
| 484
| 7.686275
| 0.254902
| 0.303571
| 0.306122
| 0.397959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119835
| 484
| 15
| 45
| 32.266667
| 0.920188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| true
| 0
| 0.461538
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4eda24af6ddf82cc5cc2e25951b4fb9c83b51905
| 159
|
py
|
Python
|
bitmovin/resources/models/encodings/pertitle/auto_representation.py
|
koraygulcu/bitmovin-python
|
e8b896e2cb44142c91828533b8fb02f20eb0fbe5
|
[
"Unlicense"
] | null | null | null |
bitmovin/resources/models/encodings/pertitle/auto_representation.py
|
koraygulcu/bitmovin-python
|
e8b896e2cb44142c91828533b8fb02f20eb0fbe5
|
[
"Unlicense"
] | null | null | null |
bitmovin/resources/models/encodings/pertitle/auto_representation.py
|
koraygulcu/bitmovin-python
|
e8b896e2cb44142c91828533b8fb02f20eb0fbe5
|
[
"Unlicense"
] | null | null | null |
class AutoRepresentation:
def __init__(self, adopt_configuration_threshold=None):
self.adoptConfigurationThreshold = adopt_configuration_threshold
| 39.75
| 72
| 0.823899
| 14
| 159
| 8.785714
| 0.714286
| 0.292683
| 0.439024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125786
| 159
| 3
| 73
| 53
| 0.884892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
14f0031f20c1d451293a9e4ffe1e1cb773cf31df
| 57
|
py
|
Python
|
flyeye/dynamics/__init__.py
|
sbernasek/flyeye
|
95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8
|
[
"MIT"
] | 2
|
2020-02-22T09:53:17.000Z
|
2020-02-24T19:02:01.000Z
|
flyeye/dynamics/__init__.py
|
sbernasek/flyeye
|
95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8
|
[
"MIT"
] | 1
|
2019-11-20T17:11:07.000Z
|
2019-11-20T17:11:07.000Z
|
flyeye/dynamics/__init__.py
|
sebastianbernasek/flyeye
|
95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8
|
[
"MIT"
] | null | null | null |
from .visualization import plot_mean, plot_mean_interval
| 28.5
| 56
| 0.877193
| 8
| 57
| 5.875
| 0.75
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 57
| 1
| 57
| 57
| 0.903846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
14ff6bd96aa976b58904b681f23b026afedef8de
| 12,852
|
py
|
Python
|
PaddleFSL/examples/image_classification/maml_image_classification.py
|
tianxin1860/FSL-Mate
|
74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3
|
[
"MIT"
] | null | null | null |
PaddleFSL/examples/image_classification/maml_image_classification.py
|
tianxin1860/FSL-Mate
|
74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3
|
[
"MIT"
] | null | null | null |
PaddleFSL/examples/image_classification/maml_image_classification.py
|
tianxin1860/FSL-Mate
|
74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3
|
[
"MIT"
] | null | null | null |
import paddle
import paddlefsl
from paddlefsl.model_zoo import maml
# Set computing device
paddle.set_device('gpu:0')
# """ ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
# ----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.1
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.001
INNER_LR = 0.03
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.0015
INNER_LR = 0.15
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.002
INNER_LR = 0.05
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.003
INNER_LR = 0.08
ITERATIONS = 5000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration5000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.003
INNER_LR = 0.1
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 15000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration15000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.01
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
def main():
train_dir = maml.meta_training(train_dataset=TRAIN_DATASET,
valid_dataset=VALID_DATASET,
ways=WAYS,
shots=SHOTS,
model=MODEL,
meta_lr=META_LR,
inner_lr=INNER_LR,
iterations=ITERATIONS,
meta_batch_size=META_BATCH_SIZE,
inner_adapt_steps=TRAIN_INNER_ADAPT_STEPS,
approximate=APPROXIMATE,
report_iter=REPORT_ITER,
save_model_iter=SAVE_MODEL_ITER,
save_model_root=SAVE_MODEL_ROOT)
print(train_dir)
state_dict = paddle.load(train_dir + '/' + TEST_PARAM_FILE)
MODEL.load_dict(state_dict)
maml.meta_testing(model=MODEL,
test_dataset=TEST_DATASET,
test_epoch=TEST_EPOCH,
test_batch_size=META_BATCH_SIZE,
ways=WAYS,
shots=SHOTS,
inner_lr=INNER_LR,
inner_adapt_steps=TEST_INNER_ADAPT_STEPS,
approximate=APPROXIMATE)
if __name__ == '__main__':
main()
| 35.502762
| 106
| 0.591581
| 1,513
| 12,852
| 4.748843
| 0.067416
| 0.093528
| 0.140292
| 0.041754
| 0.918859
| 0.900765
| 0.891997
| 0.882672
| 0.877662
| 0.86945
| 0
| 0.060437
| 0.137411
| 12,852
| 361
| 107
| 35.601108
| 0.587678
| 0.018363
| 0
| 0.117647
| 0
| 0
| 0.030134
| 0.009736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.058824
| 0
| 0.078431
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
092ff28cf017dfa08a6c336b9f9f79e5dc743c1f
| 25
|
py
|
Python
|
rewx/__init__.py
|
akrk1986/re-wx
|
2f50d1c0afe77313548847b279327d7041623721
|
[
"MIT"
] | 103
|
2021-01-18T22:06:46.000Z
|
2022-03-24T15:57:25.000Z
|
rewx/__init__.py
|
ronny-rentner/re-wx
|
185c509ef7a590d7abb758be687fb59048019adb
|
[
"MIT"
] | 6
|
2021-01-26T11:45:40.000Z
|
2022-01-15T08:18:12.000Z
|
rewx/__init__.py
|
ronny-rentner/re-wx
|
185c509ef7a590d7abb758be687fb59048019adb
|
[
"MIT"
] | 4
|
2021-01-26T10:13:20.000Z
|
2022-01-10T09:02:27.000Z
|
from rewx.core import *
| 8.333333
| 23
| 0.72
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 25
| 2
| 24
| 12.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
11b673d3e56e187a96e8ce75c9577f8cea8df161
| 200
|
py
|
Python
|
pymtl3/passes/rtlir/structural/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 152
|
2020-06-03T02:34:11.000Z
|
2022-03-30T04:16:45.000Z
|
pymtl3/passes/rtlir/structural/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 139
|
2019-05-29T00:37:09.000Z
|
2020-05-17T16:49:26.000Z
|
pymtl3/passes/rtlir/structural/__init__.py
|
kevinyuan/pymtl3
|
5949e6a4acc625c0ccbbb25be3af1d0db683df3c
|
[
"BSD-3-Clause"
] | 22
|
2020-05-18T13:42:05.000Z
|
2022-03-11T08:37:51.000Z
|
"""Expose structural RTLIR generation pass.
PyMTL user should only interact with the passes exposed here.
"""
from .StructuralRTLIRGenL4Pass import StructuralRTLIRGenL4Pass as StructuralRTLIRGenPass
| 33.333333
| 88
| 0.84
| 21
| 200
| 8
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011299
| 0.115
| 200
| 5
| 89
| 40
| 0.937853
| 0.515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ee9a90e09df8676533abaa0b7de5176954a8137e
| 3,542
|
py
|
Python
|
server/server/apps/course/views.py
|
tjsga/study-bank
|
f4cb17bc642d2fd28affde89d2af6a8ecd2286f2
|
[
"MIT"
] | null | null | null |
server/server/apps/course/views.py
|
tjsga/study-bank
|
f4cb17bc642d2fd28affde89d2af6a8ecd2286f2
|
[
"MIT"
] | null | null | null |
server/server/apps/course/views.py
|
tjsga/study-bank
|
f4cb17bc642d2fd28affde89d2af6a8ecd2286f2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.core.exceptions import PermissionDenied
from django.http import Http404
from .models import Course
from ..mod.models import Moderator
from ..files.models import File
from ..decorators import login
# Create your views here.
@login
def index(request):
courses = Course.objects.all()
return render(request, 'class/index.html', {'classes': courses})
@login
def show(request, course_url):
course = get_object_or_404(Course, url=course_url)
is_mod = False
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
is_mod = False
return render(request, 'class/show.html', {'course': course, 'is_mod': is_mod})
if mod.admin:
is_mod = True
elif course in mod.classes.all():
is_mod = True
return render(request, 'class/show.html', {'course': course, 'is_mod': is_mod})
@login
def approve(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.unapproved_files.get(id=doc_id)
except File.DoesNotExist:
try:
doc = course.files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
raise Http404("Error: Document Already Approved")
course.unapproved_files.remove(doc)
course.files.add(doc)
return render(request, 'class/approve.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied
@login
def remove(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.files.get(id=doc_id)
except File.DoesNotExist:
try:
doc = course.unapproved_files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
course.unapproved_files.remove(doc)
course.rejected_files.add(doc)
return render(request, 'class/remove.html', {'doc': doc, 'course': course})
course.files.remove(doc)
course.rejected_files.add(doc)
return render(request, 'class/remove.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied
@login
def undelete(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.rejected_files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
course.rejected_files.remove(doc)
course.files.add(doc)
return render(request, 'class/undelete.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied
| 31.625
| 87
| 0.634952
| 427
| 3,542
| 5.156909
| 0.161593
| 0.053134
| 0.0604
| 0.076294
| 0.76703
| 0.757493
| 0.745686
| 0.745686
| 0.724342
| 0.707084
| 0
| 0.011437
| 0.259458
| 3,542
| 112
| 88
| 31.625
| 0.828059
| 0.006494
| 0
| 0.744186
| 0
| 0
| 0.101762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.081395
| 0
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eed876b1554e0a4c99de5f131d255d84ecaa3345
| 78
|
py
|
Python
|
lyrebird/plugins/__init__.py
|
dodosophia/lyrebird
|
b3c3d6e0f0f47b8df0cc119a1e5d30763371fa3d
|
[
"MIT"
] | 1
|
2020-03-18T05:56:53.000Z
|
2020-03-18T05:56:53.000Z
|
lyrebird/plugins/__init__.py
|
robert0825/lyrebird
|
18bcbd2030bd4a506d1f519ae0316d8fc667db4f
|
[
"MIT"
] | null | null | null |
lyrebird/plugins/__init__.py
|
robert0825/lyrebird
|
18bcbd2030bd4a506d1f519ae0316d8fc667db4f
|
[
"MIT"
] | 1
|
2019-03-11T09:25:36.000Z
|
2019-03-11T09:25:36.000Z
|
from .plugin_loader import manifest
from .plugin_manager import PluginManager
| 26
| 41
| 0.871795
| 10
| 78
| 6.6
| 0.7
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 42
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eef0f57e2e52d98324d6736af1814a7fec12251f
| 23
|
py
|
Python
|
Game/History/__init__.py
|
ritwikd/interom
|
0b626351fd742f2a99d0a6d11ba8c1a214aab576
|
[
"MIT"
] | null | null | null |
Game/History/__init__.py
|
ritwikd/interom
|
0b626351fd742f2a99d0a6d11ba8c1a214aab576
|
[
"MIT"
] | 1
|
2021-03-06T22:08:32.000Z
|
2021-03-06T22:09:07.000Z
|
Game/History/__init__.py
|
ritwikd/interom
|
0b626351fd742f2a99d0a6d11ba8c1a214aab576
|
[
"MIT"
] | 1
|
2021-03-03T22:48:07.000Z
|
2021-03-03T22:48:07.000Z
|
from . import Log, Move
| 23
| 23
| 0.73913
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eef62d1ce6768e7a68a4a1159bbd33491dcbc7e8
| 6,126
|
py
|
Python
|
tests/objects/fiber_manipulation_test.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
tests/objects/fiber_manipulation_test.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
tests/objects/fiber_manipulation_test.py
|
jifengting1/fastpliFork
|
1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import fastpli.objects
import fastpli.tools
class MainTest(unittest.TestCase):
# TODO: implement object.fiber.*manipulations*
def setUp(self):
self.fiber = np.array([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
self.fiber_bundle = [self.fiber.copy()]
self.fiber_bundles = [[self.fiber.copy()]]
def test_resize(self):
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10)
self.assertTrue(np.array_equal(fiber, self.fiber * 10))
fb = fastpli.objects.fiber_bundle.Rescale(self.fiber_bundle, 10)
for f in fb:
self.assertTrue(np.array_equal(f, self.fiber * 10))
fbs = fastpli.objects.fiber_bundles.Rescale(self.fiber_bundles, 10)
for fb in fbs:
for f in fb:
self.assertTrue(np.array_equal(f, self.fiber * 10))
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10, mod='points')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2] * 10))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10, mod='radii')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2]))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1] * 10))
def test_rotation(self):
fiber = fastpli.objects.fiber.Rotate(self.fiber,
fastpli.tools.rotation.x(0))
self.assertTrue(np.array_equal(self.fiber, fiber))
fiber = fastpli.objects.fiber.Rotate(
self.fiber, fastpli.tools.rotation.x(np.deg2rad(90)))
self.assertTrue(
np.allclose(fiber, np.array([[0, 0, 0, 1], [1, -1, 1, 2]])))
fiber = fastpli.objects.fiber.Rotate(
self.fiber, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for f in self.fiber_bundle:
fiber = fastpli.objects.fiber.Rotate(
f, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for fb in self.fiber_bundles:
for f in fb:
fiber = fastpli.objects.fiber.Rotate(
f, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
def test_translate(self):
fiber = fastpli.objects.fiber.Translate(self.fiber, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
for f in self.fiber_bundle:
fiber = fastpli.objects.fiber.Translate(f, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
for fb in self.fiber_bundles:
for f in fb:
fiber = fastpli.objects.fiber.Translate(f, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
def test_cut(self):
fiber = np.array([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-10] * 3, [10] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2], [100, 100, 100, 2]],
dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2], [100, 100, 100, 2],
[10, 10, 10, 2], [0, 0, 0, 1]],
dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber_bundle = [fiber]
cut_fb = fastpli.objects.fiber_bundle.Cut(fiber_bundle,
[[-5] * 3, [5] * 3])
fibers = cut_fb
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber_bundles = [[fiber]]
cut_fbs = fastpli.objects.fiber_bundles.Cut(fiber_bundles,
[[-5] * 3, [5] * 3])
fibers = cut_fbs[0]
self.assertTrue(len(cut_fbs) == 1)
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[5] * 3, [6] * 3])
self.assertTrue(np.array_equal(fibers[0], fiber))
if __name__ == '__main__':
unittest.main()
| 42.839161
| 79
| 0.538851
| 820
| 6,126
| 3.956098
| 0.07439
| 0.181258
| 0.088779
| 0.110049
| 0.848644
| 0.792232
| 0.784217
| 0.774044
| 0.741985
| 0.701911
| 0
| 0.062457
| 0.289096
| 6,126
| 142
| 80
| 43.140845
| 0.682434
| 0.007183
| 0
| 0.608696
| 0
| 0
| 0.003125
| 0
| 0
| 0
| 0
| 0.007042
| 0.365217
| 1
| 0.043478
| false
| 0
| 0.034783
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e102bdd6852dce95483c7c8cdb3211b3d9ab7231
| 43
|
py
|
Python
|
run_5395.py
|
mpi3d/goodix-fp-dump
|
039940845bd5eeb98cd92d72f267e3be77feb156
|
[
"MIT"
] | 136
|
2021-05-05T14:16:17.000Z
|
2022-03-31T09:04:18.000Z
|
run_5395.py
|
tsunekotakimoto/goodix-fp-dump
|
b88ecbababd3766314521fe30ee943c4bd1810df
|
[
"MIT"
] | 14
|
2021-08-20T09:49:39.000Z
|
2022-03-20T13:18:05.000Z
|
run_5395.py
|
tsunekotakimoto/goodix-fp-dump
|
b88ecbababd3766314521fe30ee943c4bd1810df
|
[
"MIT"
] | 11
|
2021-08-02T15:49:11.000Z
|
2022-02-06T22:06:42.000Z
|
from driver_53x5 import main
main(0x5395)
| 10.75
| 28
| 0.813953
| 7
| 43
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 0.139535
| 43
| 3
| 29
| 14.333333
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e12ad429759f61a8d7e2d053224398fdfc9dad67
| 19
|
py
|
Python
|
pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/rip/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
from .rip import *
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
012f17bafc339e27fe0149bdbf1a7b12a681ef93
| 29
|
py
|
Python
|
demo2022.py
|
finaleo83/demo01
|
579782f564ab0f5cc95f6b5e63644c5f930c0019
|
[
"Unlicense"
] | null | null | null |
demo2022.py
|
finaleo83/demo01
|
579782f564ab0f5cc95f6b5e63644c5f930c0019
|
[
"Unlicense"
] | null | null | null |
demo2022.py
|
finaleo83/demo01
|
579782f564ab0f5cc95f6b5e63644c5f930c0019
|
[
"Unlicense"
] | null | null | null |
print("Hello, World! Again!")
| 29
| 29
| 0.689655
| 4
| 29
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 29
| 1
| 29
| 29
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
013c77d6a4350f96399efe1ca86c27a469b9fa59
| 32
|
py
|
Python
|
src/logic_analyzer_bfms/__init__.py
|
pybfms/pybfms_logic_analyzer
|
7696e16c53a7248a0660ba1cc8f108cda03c1e08
|
[
"Apache-2.0"
] | null | null | null |
src/logic_analyzer_bfms/__init__.py
|
pybfms/pybfms_logic_analyzer
|
7696e16c53a7248a0660ba1cc8f108cda03c1e08
|
[
"Apache-2.0"
] | null | null | null |
src/logic_analyzer_bfms/__init__.py
|
pybfms/pybfms_logic_analyzer
|
7696e16c53a7248a0660ba1cc8f108cda03c1e08
|
[
"Apache-2.0"
] | 1
|
2020-11-22T08:37:39.000Z
|
2020-11-22T08:37:39.000Z
|
from .la_initiator_bfm import *
| 16
| 31
| 0.8125
| 5
| 32
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 2
| 31
| 16
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
014d029371edfc926a3b46e79008ce4486f7ec74
| 29
|
py
|
Python
|
pydreamer/models/__init__.py
|
rogerscristo/pydreamer
|
e44fdf8b35fe48662ed619100fdd5d9d6858f6db
|
[
"MIT"
] | 75
|
2021-10-12T13:17:48.000Z
|
2022-03-04T14:43:30.000Z
|
pydreamer/models/__init__.py
|
LvZut/pydreamer
|
e3a522e13319d3667b526abb5f5747ab68e3c04e
|
[
"MIT"
] | 2
|
2022-01-17T06:49:50.000Z
|
2022-02-17T19:45:24.000Z
|
pydreamer/models/__init__.py
|
LvZut/pydreamer
|
e3a522e13319d3667b526abb5f5747ab68e3c04e
|
[
"MIT"
] | 10
|
2021-11-27T18:20:26.000Z
|
2022-03-14T09:06:52.000Z
|
from .dreamer import Dreamer
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0186c6f9ccb6910901110026b5550d4363a11f93
| 110
|
py
|
Python
|
tests/collagen/utils/__init__.py
|
newskylabs/newskylabs-collagen
|
3e2e331605745e6709f57dce8730ceb9ceaa002c
|
[
"Apache-2.0"
] | null | null | null |
tests/collagen/utils/__init__.py
|
newskylabs/newskylabs-collagen
|
3e2e331605745e6709f57dce8730ceb9ceaa002c
|
[
"Apache-2.0"
] | null | null | null |
tests/collagen/utils/__init__.py
|
newskylabs/newskylabs-collagen
|
3e2e331605745e6709f57dce8730ceb9ceaa002c
|
[
"Apache-2.0"
] | null | null | null |
from . import test_conversion
from . import test_generic
from . import test_idxgz
from . import test_settings
| 22
| 29
| 0.818182
| 16
| 110
| 5.375
| 0.4375
| 0.465116
| 0.651163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 110
| 4
| 30
| 27.5
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
018d64e411b9a079532721baad7937f619846f0d
| 187
|
py
|
Python
|
tests/test_main.py
|
ZhuYuJin/cgroup-parser
|
7132791c496dc87af04d0458ad1f820eac8a8f0f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_main.py
|
ZhuYuJin/cgroup-parser
|
7132791c496dc87af04d0458ad1f820eac8a8f0f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_main.py
|
ZhuYuJin/cgroup-parser
|
7132791c496dc87af04d0458ad1f820eac8a8f0f
|
[
"Apache-2.0"
] | null | null | null |
import cgroup_parser
def test_interface():
cgroup_parser.get_max_procs()
cgroup_parser.get_cpu_usage()
cgroup_parser.get_memory_limit()
cgroup_parser.get_memory_usage()
| 20.777778
| 36
| 0.780749
| 26
| 187
| 5.076923
| 0.5
| 0.454545
| 0.454545
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139037
| 187
| 8
| 37
| 23.375
| 0.819876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6dba80a9622a3df8b603c41e7552e6d4c8ed3c02
| 23
|
py
|
Python
|
tests/res/foo.py
|
lepture/werkzeug
|
627ac8370bc5aa3a04ba365b4ebcd32b6a859863
|
[
"BSD-3-Clause"
] | 1
|
2019-04-14T19:58:21.000Z
|
2019-04-14T19:58:21.000Z
|
tests/res/foo.py
|
lepture/werkzeug
|
627ac8370bc5aa3a04ba365b4ebcd32b6a859863
|
[
"BSD-3-Clause"
] | null | null | null |
tests/res/foo.py
|
lepture/werkzeug
|
627ac8370bc5aa3a04ba365b4ebcd32b6a859863
|
[
"BSD-3-Clause"
] | null | null | null |
from .bar import value
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6def8fbc025a4ae631780ed754a16d15160b7b0b
| 6,514
|
py
|
Python
|
knx_stack/client/knxnet_ip_discovery.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | 2
|
2021-07-28T07:42:28.000Z
|
2022-01-25T18:56:05.000Z
|
knx_stack/client/knxnet_ip_discovery.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | 6
|
2021-07-25T21:36:01.000Z
|
2022-02-20T21:11:31.000Z
|
knx_stack/client/knxnet_ip_discovery.py
|
majamassarini/knx-stack
|
11a9baac6b7600649b5fbca43c93b200b23676b4
|
[
"MIT"
] | null | null | null |
import struct
import socket
import asyncio
import logging
import knx_stack
class Request(asyncio.DatagramProtocol):
def __init__(self, local_addr: str, local_port: int):
"""
A KNXnet IP Discovery request service
:param local_addr: discovery request instance host ip address
:param local_port: discovery request instance binding port
Example::
async def send_discovery_request(local_addr: str, local_port: int):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', knx_stack.knxnet_ip.DISCOVERY_MULTICAST_PORT))
group = socket.inet_aton(knx_stack.knxnet_ip.DISCOVERY_MULTICAST_ADDR)
mreq = struct.pack('!4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setblocking(False)
transport, protocol = await loop.create_datagram_endpoint(
lambda: Request(local_addr, local_port), sock=sock,
)
return transport, protocol
"""
self._loop = asyncio.get_event_loop()
self._transport = None
self._local_addr = local_addr
self._local_port = local_port
self._state = knx_stack.knxnet_ip.State(knx_stack.Medium.knxnet_ip, None, None)
self.logger = logging.getLogger(__name__)
def connection_made(self, transport):
self._transport = transport
self.logger.info("Connection made: {}".format(str(self._transport)))
msg = knx_stack.encode_msg(
self._state,
knx_stack.knxnet_ip.core.search.req.Msg(
addr=self._local_addr, port=self._local_port
),
)
self.logger.info("encode: {}".format(msg))
self._transport.sendto(
bytearray.fromhex(str(msg)),
(
knx_stack.knxnet_ip.DISCOVERY_MULTICAST_ADDR,
knx_stack.knxnet_ip.DISCOVERY_MULTICAST_PORT,
),
)
def connection_lost(self, exc):
self.logger.error("Connection lost: {}".format(str(exc)))
self._transport = None
def error_received(self, exc):
self.logger.error("Error received: {}".format(str(exc)))
def datagram_received(self, data, addr):
self.logger.info("read data: {}".format(data.hex()))
self.logger.info("read from: {}".format(str(addr)))
class Listen(asyncio.DatagramProtocol):
"""
A KNXnet IP Discovery listener service
:param local_addr: discovery listener instance host ip address
:param local_port: discovery listener instance binding port
Example::
async def listen_discovery_responses(local_addr: str, local_port: int):
transport, protocol = await loop.create_datagram_endpoint(
lambda: Listen(), local_addr=(local_addr, local_port),
)
return transport, protocol
if __name__ == '__main__':
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
root.addHandler(handler)
loop = asyncio.get_event_loop()
transport1, _ = loop.run_until_complete(loop.create_task(listen_discovery_responses('172.31.10.111', 5544)))
transport2, _ = loop.run_until_complete(loop.create_task(send_discovery_request('172.31.10.111', 5544)))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
print("Closing transport...")
transport1.close()
transport2.close()
loop.close()
"""
def __init__(self):
self._transport = None
self._state = knx_stack.knxnet_ip.State(knx_stack.Medium.knxnet_ip, None, None)
self.logger = logging.getLogger(__name__)
def connection_made(self, transport):
self._transport = transport
self.logger.info("Connection made: {}".format(str(self._transport)))
def connection_lost(self, exc):
self.logger.error("Connection lost: {}".format(str(exc)))
self._transport = None
def error_received(self, exc):
self.logger.error("Error received: {}".format(str(exc)))
def datagram_received(self, data, addr):
self.logger.info("read {}".format(str(data.hex())))
self.logger.info("read {}".format(str(addr)))
search_response = knx_stack.decode_msg(
self._state, knx_stack.knxnet_ip.Msg.make_from_str(data.hex())
)
self.logger.info("read decoded: {}".format(str(search_response)))
async def send_discovery_request(local_addr: str, local_port: int):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", knx_stack.knxnet_ip.DISCOVERY_MULTICAST_PORT))
group = socket.inet_aton(knx_stack.knxnet_ip.DISCOVERY_MULTICAST_ADDR)
mreq = struct.pack("!4sL", group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setblocking(False)
transport, protocol = await loop.create_datagram_endpoint(
lambda: Request(local_addr, local_port),
sock=sock,
)
return transport, protocol
async def listen_discovery_responses(local_addr: str, local_port: int):
transport, protocol = await loop.create_datagram_endpoint(
lambda: Listen(),
local_addr=(local_addr, local_port),
)
return transport, protocol
if __name__ == "__main__":
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
root.addHandler(handler)
loop = asyncio.get_event_loop()
if len(sys.argv):
transport1, _ = loop.run_until_complete(
loop.create_task(listen_discovery_responses(sys.argv[0], 5544))
)
transport2, _ = loop.run_until_complete(
loop.create_task(send_discovery_request(sys.argv[0], 5544))
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
print("Closing transport...")
transport1.close()
transport2.close()
loop.close()
| 34.648936
| 120
| 0.642462
| 751
| 6,514
| 5.302264
| 0.174434
| 0.036163
| 0.035158
| 0.040181
| 0.853591
| 0.827725
| 0.798594
| 0.751381
| 0.729282
| 0.729282
| 0
| 0.010647
| 0.25023
| 6,514
| 187
| 121
| 34.834225
| 0.804668
| 0.315321
| 0
| 0.3
| 0
| 0
| 0.052971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.01
| 0.06
| 0
| 0.2
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0986ca341593898178573e0a204ed21602be920f
| 99
|
py
|
Python
|
tail/__init__.py
|
0eu/tail-assignment
|
a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f
|
[
"MIT"
] | 1
|
2020-12-01T15:05:21.000Z
|
2020-12-01T15:05:21.000Z
|
tail/__init__.py
|
0eu/tail-assignment
|
a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f
|
[
"MIT"
] | null | null | null |
tail/__init__.py
|
0eu/tail-assignment
|
a86cdcbee88a6d0bf07b7ab7175a7742a5188a2f
|
[
"MIT"
] | null | null | null |
from tail.core import read_last_lines, follow_lines
__all__ = ["read_last_lines", "follow_lines"]
| 24.75
| 51
| 0.79798
| 15
| 99
| 4.6
| 0.6
| 0.231884
| 0.376812
| 0.550725
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10101
| 99
| 3
| 52
| 33
| 0.775281
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
09c49181d3fdabb104e8b2473f43e07ce944fcb6
| 74
|
py
|
Python
|
shapes-trainer/training_shapes_module/__init__.py
|
dakotaJang/shapes
|
19ba73ad2a9b50b57cafca53560678273aeb7776
|
[
"MIT"
] | 1
|
2019-02-02T11:46:55.000Z
|
2019-02-02T11:46:55.000Z
|
shapes-trainer/training_shapes_module/__init__.py
|
dakotaJang/shapes
|
19ba73ad2a9b50b57cafca53560678273aeb7776
|
[
"MIT"
] | null | null | null |
shapes-trainer/training_shapes_module/__init__.py
|
dakotaJang/shapes
|
19ba73ad2a9b50b57cafca53560678273aeb7776
|
[
"MIT"
] | null | null | null |
from .loader import *
from .model import *
from .train_and_test import *
| 24.666667
| 29
| 0.743243
| 11
| 74
| 4.818182
| 0.636364
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 3
| 29
| 24.666667
| 0.868852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
09e89717699974cfa907e599273f2f898e6cc20f
| 30
|
py
|
Python
|
pastepdb/__init__.py
|
pooriaahmadi/pastepdb
|
166b2e8614ee2ea6c8f2f804af23458defb4674a
|
[
"MIT"
] | 8
|
2021-03-17T10:48:49.000Z
|
2021-04-06T08:16:04.000Z
|
pastepdb/__init__.py
|
pooriaahmadi/pastepdb
|
166b2e8614ee2ea6c8f2f804af23458defb4674a
|
[
"MIT"
] | null | null | null |
pastepdb/__init__.py
|
pooriaahmadi/pastepdb
|
166b2e8614ee2ea6c8f2f804af23458defb4674a
|
[
"MIT"
] | null | null | null |
from .pastepdb import pastepdb
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1127e97d3747a0a490202eaf8f996051a3a32f10
| 194
|
py
|
Python
|
nawrapper/__init__.py
|
xzackli/nawrapper
|
f67c02b48d04ed35ab05a378b9884fefd9d07d7f
|
[
"MIT"
] | null | null | null |
nawrapper/__init__.py
|
xzackli/nawrapper
|
f67c02b48d04ed35ab05a378b9884fefd9d07d7f
|
[
"MIT"
] | 9
|
2019-08-27T11:52:37.000Z
|
2021-09-21T05:13:25.000Z
|
nawrapper/__init__.py
|
xzackli/nawrapper
|
f67c02b48d04ed35ab05a378b9884fefd9d07d7f
|
[
"MIT"
] | 1
|
2020-07-07T14:31:43.000Z
|
2020-07-07T14:31:43.000Z
|
"""Package init file.
We want the user to get everything right away upon `import nawrapper as nw`.
"""
from .power import *
from .maptools import *
from .covtools import *
from . import planck
| 21.555556
| 76
| 0.731959
| 29
| 194
| 4.896552
| 0.758621
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180412
| 194
| 8
| 77
| 24.25
| 0.893082
| 0.494845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1157a67a471d97e9b998c20a52b64bbf93cf6c33
| 13,715
|
py
|
Python
|
multipy/check.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
multipy/check.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
multipy/check.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
"""multipy: Python library for multicomponent mass transfer"""
__author__ = "James C. Sutherland, Kamila Zdybal"
__copyright__ = "Copyright (c) 2022, James C. Sutherland, Kamila Zdybal"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["Kamila Zdybal"]
__email__ = ["kamilazdybal@gmail.com"]
__status__ = "Production"
import numpy as np
import pandas as pd
import random
import copy
import scipy
import multipy
import warnings
gas_constant = 8.31446261815324
################################################################################
################################################################################
####
#### Class: Check
####
################################################################################
################################################################################
class Check:
"""
Supports performing basic checks of the computed quantities.
"""
# --------------------------------------------------------------------------
def __init__(self):
pass
# --------------------------------------------------------------------------
def sum_of_species_fractions(self, species_fractions, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fractions sum to 1.0 for
every observation within a specified tolerance.
For mole fractions:
.. math::
\\sum_{i=1}^{n} X_i = 1.0
For mass fractions:
.. math::
\\sum_{i=1}^{n} Y_i = 1.0
For volume fractions:
.. math::
\\sum_{i=1}^{n} V_i = 1.0
where :math:`n` is the number of species.
:param species_fractions:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fractions in :math:`[-]`.
It should be of size ``(n_species, n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species mole/mass/volume fractions do not sum to 1.0 within a specified tolerance.
"""
if not isinstance(species_fractions, np.ndarray):
raise ValueError("Parameter `species_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_fractions)
except:
raise ValueError("Parameter `species_fractions` has to be a matrix.")
if n_species < 2:
raise ValueError("Species fractions matrix `species_mole_fractions` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_fractions` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_fractions, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < 1+tolerance) and (observation > 1-tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All mole/mass/volume fractions sum to 1.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where mole/mass/volume fractions do not sum to 1.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
def range_of_species_fractions(self, species_fractions, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fraction values are bounded between
0 and 1.
For mole fractions:
.. math::
X_i \\in \\langle 0, 1 \\rangle
For mass fractions:
.. math::
Y_i \\in \\langle 0, 1 \\rangle
For volume fractions:
.. math::
V_i \\in \\langle 0, 1 \\rangle
:param species_fractions:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fractions in :math:`[-]`.
It should be of size ``(n_observations,n_species)`` where
``n_species`` is at least 2.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx_below_zero** - indices of observations where species mole/mass/volume fractions are less than 0.0 within a specified tolerance.
- **idx_above_one** - indices of observations where species mole/mass/volume fractions are larger than 1.0 within a specified tolerance.
"""
if not isinstance(species_fractions, np.ndarray):
raise ValueError("Parameter `species_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_fractions)
except:
raise ValueError("Parameter `species_fractions` has to be a matrix.")
if n_species < 2:
raise ValueError("Mole fractions matrix `species_fractions` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_fractions` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
if not np.greater_equal(species_fractions, 0-tolerance).all():
if verbose: print('Not all mole/mass/volume fractions are larger than 0.0 within a specified tolerance.')
(idx_below_zero_i, idx_below_zero_j) = np.where(species_fractions<(0-tolerance))
idx_below_zero = np.hstack((idx_below_zero_i[:,None], idx_below_zero_j[:,None]))
else:
if verbose: print('All mole/mass/volume fractions are larger than 0.0 within a specified tolerance.')
idx_below_zero = np.array([])
if not np.less_equal(species_fractions, 1+tolerance).all():
if verbose: print('Not all mole/mass/volume fractions are smaller than 1.0 within a specified tolerance.')
(idx_above_one_i, idx_above_one_j) = np.where(species_fractions>(1+tolerance))
idx_above_one = np.hstack((idx_above_one_i[:,None], idx_above_one_j[:,None]))
else:
if verbose: print('All mole/mass/volume fractions are smaller than 1.0 within a specified tolerance.')
idx_above_one = np.array([])
return (idx_below_zero, idx_above_one)
# --------------------------------------------------------------------------
def sum_of_species_gradients(self, species_gradients, tolerance=1e-12, verbose=False):
"""
Checks if all species mole/mass/volume fraction gradients sum to 0.0 for
every observation within a specified tolerance.
For mole fractions:
.. math::
\\sum_{i=1}^{n} \\nabla X_i = 0.0
For mass fractions:
.. math::
\\sum_{i=1}^{n} \\nabla Y_i = 0.0
For volume fractions:
.. math::
\\sum_{i=1}^{n} \\nabla V_i = 0.0
where :math:`n` is the number of species.
:param species_gradients:
scalar ``numpy.ndarray`` specifying **all** species mole/mass/volume fraction gradients in :math:`[-]`.
It should be of size ``(n_species, n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species mole/mass/volume fraction gradients do not sum to 0.0 within a specified tolerance.
"""
if not isinstance(species_gradients, np.ndarray):
raise ValueError("Parameter `species_gradients` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_gradients)
except:
raise ValueError("Parameter `species_gradients` has to be a matrix.")
if n_species < 2:
raise ValueError("Species fractions matrix `species_gradients` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_gradients` is smaller than the number of species. Make sure that the `species_fractions` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_gradients, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < tolerance) and (observation > -tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All mole/mass/volume fraction gradiens sum to 0.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where mole/mass/volume fraction gradients do not sum to 0.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
def sum_of_species_production_rates(self, species_production_rates, tolerance=1e-12, verbose=False):
"""
Checks if all species production rates sum to 0.0 for
every observation within a specified tolerance:
For net molar production rates:
.. math::
\\sum_{i=1}^{n} s_i = 0.0
For net mass production rates:
.. math::
\\sum_{i=1}^{n} \\omega_i = 0.0
where :math:`n` is the number of species.
:param species_production_rates:
scalar ``numpy.ndarray`` specifying **all** species production rates, :math:`s_i` in :math:`mole/(m^3s)` or :math:`\\omega_i` in :math:`kg/(m^3s)`.
It should be of size ``(n_species,n_observations)`` where
``n_species`` is at least 2.
:param tolerance: (optional)
``float`` specifying the tolerance. It should be larger than 0.0 and
smaller than 1.0.
:param verbose: (optional)
``bool`` for printing verbose information.
:return:
- **idx** - indices of observations where species source terms do not sum to 0.0 within a specified tolerance.
"""
if not isinstance(species_production_rates, np.ndarray):
raise ValueError("Parameter `species_production_rates` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_production_rates)
except:
raise ValueError("Parameter `species_production_rates` has to be a matrix.")
if n_species < 2:
raise ValueError("Species source terms matrix `species_production_rates` has to have at least two species.")
if n_observations < n_species:
warnings.warn("Number of observations in `species_production_rates` is smaller than the number of species. Make sure that the `species_production_rates` has shape `(n_observations,n_species)`.")
if not isinstance(tolerance, float):
raise ValueError("Parameter `tolerance` has to be of type `float`.")
if tolerance <= 0 or tolerance >= 1:
raise ValueError("Parameter `tolerance` has to be larger than 0 and smaller than 1.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
sums = np.sum(species_production_rates, axis=0)
sums_boolean = np.zeros_like(sums)
for i, observation in enumerate(sums):
if (observation < tolerance) and (observation > -tolerance):
sums_boolean[i] = True
else:
sums_boolean[i] = False
if sums_boolean.all():
if verbose: print('All species production rates sum to 0.0 within a specified tolerance.')
idx = np.array([])
else:
if verbose: print('Detected observations where species production rates do not sum to 0.0 within a specified tolerance.')
(idx, ) = np.where(sums_boolean==False)
return idx
# --------------------------------------------------------------------------
| 38.525281
| 206
| 0.587386
| 1,651
| 13,715
| 4.745609
| 0.101757
| 0.055137
| 0.032163
| 0.057435
| 0.85067
| 0.826803
| 0.817486
| 0.799489
| 0.773197
| 0.735035
| 0
| 0.014349
| 0.263215
| 13,715
| 355
| 207
| 38.633803
| 0.761009
| 0.306672
| 0
| 0.57554
| 0
| 0.043165
| 0.355328
| 0.034841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035971
| false
| 0.007194
| 0.05036
| 0
| 0.122302
| 0.071942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fed9bd2808591485831ae3b90b08dc959af84228
| 19
|
py
|
Python
|
deprecated/origin_stgcn_repo/feeder/__init__.py
|
fserracant/mmskeleton
|
44008bdef3dd6354a17c220fac8bcd8cd08ed201
|
[
"Apache-2.0"
] | 2,302
|
2018-01-23T11:18:30.000Z
|
2022-03-31T12:24:55.000Z
|
deprecated/origin_stgcn_repo/feeder/__init__.py
|
fserracant/mmskeleton
|
44008bdef3dd6354a17c220fac8bcd8cd08ed201
|
[
"Apache-2.0"
] | 246
|
2019-08-24T15:36:11.000Z
|
2022-03-23T06:57:02.000Z
|
deprecated/origin_stgcn_repo/feeder/__init__.py
|
fserracant/mmskeleton
|
44008bdef3dd6354a17c220fac8bcd8cd08ed201
|
[
"Apache-2.0"
] | 651
|
2018-01-24T00:56:54.000Z
|
2022-03-25T23:42:53.000Z
|
from . import tools
| 19
| 19
| 0.789474
| 3
| 19
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3a1bb607068330f96d4bdb50c12759ee1c1a9528
| 14,071
|
py
|
Python
|
tests/unit/test_experiments_analytics.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 1,690
|
2017-11-29T20:13:37.000Z
|
2022-03-31T12:58:11.000Z
|
tests/unit/test_experiments_analytics.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 2,762
|
2017-12-04T05:18:03.000Z
|
2022-03-31T23:40:11.000Z
|
tests/unit/test_experiments_analytics.py
|
LastRemote/sagemaker-python-sdk
|
fddf29d9e4383cd3f939253eef47ee79a464dd37
|
[
"Apache-2.0"
] | 961
|
2017-11-30T16:44:03.000Z
|
2022-03-30T23:12:09.000Z
|
from __future__ import absolute_import
import mock
import pytest
import pandas as pd
from collections import OrderedDict
from sagemaker.analytics import ExperimentAnalytics
@pytest.fixture
def mock_session():
return mock.Mock()
def trial_component(trial_component_name):
return {
"TrialComponentName": trial_component_name,
"DisplayName": "Training",
"Source": {"SourceArn": "some-source-arn"},
"Parameters": {"hp1": {"NumberValue": 1.0}, "hp2": {"StringValue": "abc"}},
"Metrics": [
{
"MetricName": "metric1",
"Max": 5.0,
"Min": 3.0,
"Avg": 4.0,
"StdDev": 1.0,
"Last": 2.0,
"Count": 2.0,
},
{
"MetricName": "metric2",
"Max": 10.0,
"Min": 8.0,
"Avg": 9.0,
"StdDev": 0.05,
"Last": 7.0,
"Count": 2.0,
},
],
"InputArtifacts": {
"inputArtifacts1": {"MediaType": "text/plain", "Value": "s3:/foo/bar1"},
"inputArtifacts2": {"MediaType": "text/plain", "Value": "s3:/foo/bar2"},
},
"OutputArtifacts": {
"outputArtifacts1": {"MediaType": "text/csv", "Value": "s3:/sky/far1"},
"outputArtifacts2": {"MediaType": "text/csv", "Value": "s3:/sky/far2"},
},
"Parents": [{"TrialName": "trial1", "ExperimentName": "experiment1"}],
}
def test_trial_analytics_dataframe_all(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(experiment_name="experiment1", sagemaker_session=mock_session)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_selected_hyperparams(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(
experiment_name="experiment1", parameter_names=["hp2"], sagemaker_session=mock_session
)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_selected_metrics(mock_session):
mock_session.sagemaker_client.search.return_value = {
"Results": [
{"TrialComponent": trial_component("trial-1")},
{"TrialComponent": trial_component("trial-2")},
]
}
analytics = ExperimentAnalytics(
experiment_name="experiment1", metric_names=["metric1"], sagemaker_session=mock_session
)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_search_pagination(mock_session):
result_page_1 = {
"Results": [{"TrialComponent": trial_component("trial-1")}],
"NextToken": "nextToken",
}
result_page_2 = {"Results": [{"TrialComponent": trial_component("trial-2")}]}
mock_session.sagemaker_client.search.side_effect = [result_page_1, result_page_2]
analytics = ExperimentAnalytics(experiment_name="experiment1", sagemaker_session=mock_session)
expected_dataframe = pd.DataFrame.from_dict(
OrderedDict(
[
("TrialComponentName", ["trial-1", "trial-2"]),
("DisplayName", ["Training", "Training"]),
("SourceArn", ["some-source-arn", "some-source-arn"]),
("hp1", [1.0, 1.0]),
("hp2", ["abc", "abc"]),
("metric1 - Min", [3.0, 3.0]),
("metric1 - Max", [5.0, 5.0]),
("metric1 - Avg", [4.0, 4.0]),
("metric1 - StdDev", [1.0, 1.0]),
("metric1 - Last", [2.0, 2.0]),
("metric1 - Count", [2.0, 2.0]),
("metric2 - Min", [8.0, 8.0]),
("metric2 - Max", [10.0, 10.0]),
("metric2 - Avg", [9.0, 9.0]),
("metric2 - StdDev", [0.05, 0.05]),
("metric2 - Last", [7.0, 7.0]),
("metric2 - Count", [2.0, 2.0]),
("inputArtifacts1 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts1 - Value", ["s3:/foo/bar1", "s3:/foo/bar1"]),
("inputArtifacts2 - MediaType", ["text/plain", "text/plain"]),
("inputArtifacts2 - Value", ["s3:/foo/bar2", "s3:/foo/bar2"]),
("outputArtifacts1 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts1 - Value", ["s3:/sky/far1", "s3:/sky/far1"]),
("outputArtifacts2 - MediaType", ["text/csv", "text/csv"]),
("outputArtifacts2 - Value", ["s3:/sky/far2", "s3:/sky/far2"]),
("Trials", [["trial1"], ["trial1"]]),
("Experiments", [["experiment1"], ["experiment1"]]),
]
)
)
pd.testing.assert_frame_equal(expected_dataframe, analytics.dataframe())
expected_search_exp = {
"Filters": [
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "experiment1"}
]
}
mock_session.sagemaker_client.search.assert_has_calls(
[
mock.call(Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp),
mock.call(
Resource="ExperimentTrialComponent",
SearchExpression=expected_search_exp,
NextToken="nextToken",
),
]
)
def test_trial_analytics_dataframe_filter_trials_search_exp_only(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(search_expression=search_exp, sagemaker_session=mock_session)
analytics.dataframe()
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=search_exp
)
def test_trial_analytics_dataframe_filter_trials_search_exp_with_experiment(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(
experiment_name="someExperiment",
search_expression=search_exp,
sagemaker_session=mock_session,
)
analytics.dataframe()
expected_search_exp = {
"Filters": [
{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"},
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "someExperiment"},
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent", SearchExpression=expected_search_exp
)
def test_trial_analytics_dataframe_throws_error_if_no_filter_specified(mock_session):
with pytest.raises(ValueError):
ExperimentAnalytics(sagemaker_session=mock_session)
def test_trial_analytics_dataframe_filter_trials_search_exp_with_sort(mock_session):
mock_session.sagemaker_client.search.return_value = {"Results": []}
search_exp = {"Filters": [{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"}]}
analytics = ExperimentAnalytics(
experiment_name="someExperiment",
search_expression=search_exp,
sort_by="Tags.someTag",
sort_order="Ascending",
sagemaker_session=mock_session,
)
analytics.dataframe()
expected_search_exp = {
"Filters": [
{"Name": "Tags.someTag", "Operator": "Equals", "Value": "someValue"},
{"Name": "Parents.ExperimentName", "Operator": "Equals", "Value": "someExperiment"},
]
}
mock_session.sagemaker_client.search.assert_called_with(
Resource="ExperimentTrialComponent",
SearchExpression=expected_search_exp,
SortBy="Tags.someTag",
SortOrder="Ascending",
)
| 40.66763
| 100
| 0.538341
| 1,308
| 14,071
| 5.623089
| 0.10474
| 0.046363
| 0.034262
| 0.04949
| 0.868661
| 0.854657
| 0.840381
| 0.827872
| 0.808022
| 0.801088
| 0
| 0.042538
| 0.283278
| 14,071
| 345
| 101
| 40.785507
| 0.686763
| 0
| 0
| 0.604651
| 0
| 0
| 0.311563
| 0.023026
| 0
| 0
| 0
| 0
| 0.036545
| 1
| 0.033223
| false
| 0
| 0.019934
| 0.006645
| 0.059801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3a48d584ca2b00f4953c04fc6e6edaf62e4524b4
| 111
|
py
|
Python
|
lab001/load.py
|
DavidJRichards/fpga_101
|
9aa3e85211e47c63c29af36960fd767fe88f4d82
|
[
"BSD-2-Clause"
] | 2
|
2021-08-15T20:19:11.000Z
|
2021-08-16T07:28:36.000Z
|
lab001/load.py
|
DavidJRichards/fpga_101
|
9aa3e85211e47c63c29af36960fd767fe88f4d82
|
[
"BSD-2-Clause"
] | null | null | null |
lab001/load.py
|
DavidJRichards/fpga_101
|
9aa3e85211e47c63c29af36960fd767fe88f4d82
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
import os
os.system("openocd -f wukong.cfg -c 'init; pld load 0 build/top.bit; exit' ")
| 27.75
| 77
| 0.693694
| 21
| 111
| 3.666667
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.135135
| 111
| 3
| 78
| 37
| 0.78125
| 0.189189
| 0
| 0
| 0
| 0.5
| 0.719101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3a54d0fda33a47ced2ba7f11cd011f05493c2833
| 40
|
py
|
Python
|
datasets/__init__.py
|
ML-Cai/LaneDetector
|
4e56faf45cf592812284b0bfee149bba4658fac9
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
ML-Cai/LaneDetector
|
4e56faf45cf592812284b0bfee149bba4658fac9
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
ML-Cai/LaneDetector
|
4e56faf45cf592812284b0bfee149bba4658fac9
|
[
"MIT"
] | null | null | null |
from .tu_simple_lane import TusimpleLane
| 40
| 40
| 0.9
| 6
| 40
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3a5562123f0c3dc18461e7e454e66d71a8d213a8
| 29
|
py
|
Python
|
dashboard/dashboardmenu/__init__.py
|
PyFlux/PyFlux
|
8abae10261e276bf4942aed8d54ef3b5498754ca
|
[
"Apache-2.0"
] | null | null | null |
dashboard/dashboardmenu/__init__.py
|
PyFlux/PyFlux
|
8abae10261e276bf4942aed8d54ef3b5498754ca
|
[
"Apache-2.0"
] | 10
|
2020-03-24T17:09:56.000Z
|
2021-12-13T20:00:15.000Z
|
dashboard/dashboardmenu/__init__.py
|
PyFlux/PyFlux-Django-Html
|
8abae10261e276bf4942aed8d54ef3b5498754ca
|
[
"Apache-2.0"
] | null | null | null |
from .dashboard_menu import *
| 29
| 29
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
28df1e4de356cb1489acc045615f0942034640d3
| 61
|
py
|
Python
|
up/tasks/det/data/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
eod/tasks/det/data/__init__.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
eod/tasks/det/data/__init__.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from .datasets import * # noqa
from .metrics import * # noqa
| 20.333333
| 30
| 0.704918
| 8
| 61
| 5.375
| 0.625
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 61
| 2
| 31
| 30.5
| 0.877551
| 0.147541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e91e11b03c50d75698f208a10f1b310af5a8ffcc
| 4,043
|
py
|
Python
|
authors/apps/articles/tests/test_likes_dislikes.py
|
andela/ah-backend-prime
|
0708463d4565a4977a5a5dcb839f1dfed52fdc90
|
[
"BSD-3-Clause"
] | 1
|
2019-09-19T14:30:05.000Z
|
2019-09-19T14:30:05.000Z
|
authors/apps/articles/tests/test_likes_dislikes.py
|
e-ian/authors-haven-frontend
|
05829c8088ca49ef2cf0863dc87ec55b44b13534
|
[
"BSD-3-Clause"
] | 22
|
2019-03-25T16:10:53.000Z
|
2022-03-11T23:44:21.000Z
|
authors/apps/articles/tests/test_likes_dislikes.py
|
e-ian/authors-haven-frontend
|
05829c8088ca49ef2cf0863dc87ec55b44b13534
|
[
"BSD-3-Clause"
] | 6
|
2019-03-25T09:39:39.000Z
|
2021-03-11T23:54:12.000Z
|
import json
from rest_framework import status, response
from django.urls import reverse
from .base import ArticlesBaseTest
from .test_data import VALID_ARTICLE
from authors.apps.authentication.tests.test_data import (
VALID_USER_DATA
)
from rest_framework.test import APIClient, APITestCase
from .base import BaseTest
class TestLikeDislikeArticle(ArticlesBaseTest):
'''Test likes and dislikes functionality'''
def test_like_article(self):
'''Test for liking article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
response = self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 1)
def test_dislike_article(self):
'''Test for disliking article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['dislikes'], 1)
def test_like_article_twice(self):
'''Test for disliking article twice'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 0)
def test_dislike_article_twice(self):
'''Test for disliking article twice'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['dislikes'], 0)
def test_like_disliked_article_twice(self):
'''Test for liking a disliked article'''
token = self.create_user(VALID_USER_DATA)
response = self.client.post(
self.create_articles,
HTTP_AUTHORIZATION=token,
data=VALID_ARTICLE,
format='json'
)
articles_slug = response.data['article']['slug']
self.client.post(
f'/api/v1/articles/{articles_slug}/like/',
HTTP_AUTHORIZATION=token
)
response = self.client.post(
f'/api/v1/articles/{articles_slug}/dislike/',
HTTP_AUTHORIZATION=token
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['likes'], 0)
self.assertEqual(response.data['dislikes'], 1)
| 33.97479
| 71
| 0.623547
| 436
| 4,043
| 5.582569
| 0.133028
| 0.05341
| 0.074774
| 0.090386
| 0.812243
| 0.794577
| 0.779376
| 0.779376
| 0.779376
| 0.779376
| 0
| 0.009847
| 0.271581
| 4,043
| 118
| 72
| 34.262712
| 0.816638
| 0.046747
| 0
| 0.686869
| 0
| 0
| 0.112477
| 0.082658
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.050505
| false
| 0
| 0.080808
| 0
| 0.141414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3a7e4975152b719956030d04fd87b6aff71f9b39
| 203
|
py
|
Python
|
app/views/dashboard/leadership/__init__.py
|
Wern-rm/raton.by
|
68f862f2bc0551bf2327e9d6352c0cde93f45301
|
[
"MIT"
] | null | null | null |
app/views/dashboard/leadership/__init__.py
|
Wern-rm/raton.by
|
68f862f2bc0551bf2327e9d6352c0cde93f45301
|
[
"MIT"
] | null | null | null |
app/views/dashboard/leadership/__init__.py
|
Wern-rm/raton.by
|
68f862f2bc0551bf2327e9d6352c0cde93f45301
|
[
"MIT"
] | null | null | null |
from app.views.dashboard.leadership.index import leaderships
from app.views.dashboard.leadership.delete import leadership_delete
from app.views.dashboard.leadership.activation import leadership_activated
| 67.666667
| 74
| 0.8867
| 26
| 203
| 6.846154
| 0.423077
| 0.117978
| 0.202247
| 0.353933
| 0.522472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054187
| 203
| 3
| 74
| 67.666667
| 0.927083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c97f4aad4afc2d34135bd0a531bcabb3725f19f6
| 10,715
|
py
|
Python
|
tests/unit/states/test_libvirt.py
|
cvedel/salt
|
8731f42829ca1f0a38d2434057c485abeff222a7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/states/test_libvirt.py
|
cvedel/salt
|
8731f42829ca1f0a38d2434057c485abeff222a7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/states/test_libvirt.py
|
cvedel/salt
|
8731f42829ca1f0a38d2434057c485abeff222a7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# pylint: disable=3rd-party-module-not-gated
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import tempfile
import shutil
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
mock_open,
patch)
# Import Salt Libs
import salt.states.virt as virt
import salt.utils.files
class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
'''
libvirt library mockup
'''
class libvirtError(Exception): # pylint: disable=invalid-name
'''
libvirt error mockup
'''
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.libvirt
'''
def setup_loader_modules(self):
self.mock_libvirt = LibvirtMock() # pylint: disable=attribute-defined-outside-init
self.addCleanup(delattr, self, 'mock_libvirt')
loader_globals = {
'libvirt': self.mock_libvirt
}
return {virt: loader_globals}
@classmethod
def setUpClass(cls):
cls.pki_dir = tempfile.mkdtemp(dir=TMP)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.pki_dir)
del cls.pki_dir
# 'keys' function tests: 1
def test_keys(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
def test_keys_with_expiration_days(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
def test_keys_with_state(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
def test_keys_with_all_options(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
def test_running(self):
'''
running state test cases.
'''
ret = {'name': 'myvm',
'changes': {},
'result': True,
'comment': 'myvm is running'}
with patch.dict(virt.__salt__, { # pylint: disable=no-member
'virt.vm_state': MagicMock(return_value='stopped'),
'virt.start': MagicMock(return_value=0)
}):
ret.update({'changes': {'myvm': 'Domain started'},
'comment': 'Domain myvm started'})
self.assertDictEqual(virt.running('myvm'), ret)
with patch.dict(virt.__salt__, { # pylint: disable=no-member
'virt.vm_state': MagicMock(return_value='stopped'),
'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')])
}):
ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'})
self.assertDictEqual(virt.running('myvm'), ret)
| 44.832636
| 110
| 0.466729
| 936
| 10,715
| 5.200855
| 0.16453
| 0.040674
| 0.037387
| 0.048891
| 0.738496
| 0.723295
| 0.705218
| 0.705218
| 0.705218
| 0.705218
| 0
| 0.003517
| 0.416146
| 10,715
| 238
| 111
| 45.021008
| 0.774616
| 0.083808
| 0
| 0.75
| 0
| 0
| 0.141862
| 0.018305
| 0
| 0
| 0
| 0
| 0.081395
| 1
| 0.046512
| false
| 0
| 0.052326
| 0
| 0.122093
| 0.005814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a36d0ac9736ee7f0f87c898553b9622f6343c622
| 130
|
py
|
Python
|
katas/kyu_7/product_of_main_diagonal.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_7/product_of_main_diagonal.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_7/product_of_main_diagonal.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
from operator import mul
def main_diagonal_product(matrix):
return reduce(mul, (matrix[a][a] for a in xrange(len(matrix))))
| 21.666667
| 67
| 0.730769
| 21
| 130
| 4.428571
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 130
| 5
| 68
| 26
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6e8ba5d71602dfafef83788dd25424753fb81302
| 22
|
py
|
Python
|
rtk/_reports_/__init__.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/_reports_/__init__.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | null | null | null |
rtk/_reports_/__init__.py
|
rakhimov/rtk
|
adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63
|
[
"BSD-3-Clause"
] | 2
|
2020-04-03T04:14:42.000Z
|
2021-02-22T05:30:35.000Z
|
from tabular import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ed12b3edc7505ed891b2d8f3913b9e4dec71522
| 152
|
py
|
Python
|
training/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 5
|
2021-05-05T06:08:52.000Z
|
2022-03-24T04:57:52.000Z
|
training/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | null | null | null |
training/config_interface/__init__.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 2
|
2021-08-07T05:18:05.000Z
|
2022-03-31T03:48:37.000Z
|
from training.config_interface.BaseTrainingProcess import BaseTrainingProcess
from training.config_interface.BaseTrainingEpoch import BaseTrainingEpoch
| 50.666667
| 77
| 0.921053
| 14
| 152
| 9.857143
| 0.5
| 0.173913
| 0.26087
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 152
| 2
| 78
| 76
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6ed7f888ecc9bba08e6a0dcd86d63bb68f3e4ae3
| 12,156
|
py
|
Python
|
KML.py
|
ncareol/PlanFlight
|
c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb
|
[
"BSD-3-Clause"
] | 1
|
2021-06-16T01:10:35.000Z
|
2021-06-16T01:10:35.000Z
|
KML.py
|
NCAR/PlanFlight
|
c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb
|
[
"BSD-3-Clause"
] | null | null | null |
KML.py
|
NCAR/PlanFlight
|
c38b3e1a99f52655cae9e1b4f4c2ee06e56833eb
|
[
"BSD-3-Clause"
] | null | null | null |
# file KML.py
#
"Produces a kml file from the track as defined in ModuleConstructor.Track."
# Strategy here is to produce two .kml files, one that references
# google.com and one that references acserver.raf.ucar.edu, the latter
# for use on the aircraft to avoid remote connections to google.com
# in flight. The latter is named PlanAC.kml, the former Plan.kml.
#
# This is awkward code that writes many things repeatedly where I'm sure
# there is an efficient way to do this. Someday should clean this up --
# but it works, so leave it for now. It was copied from a Google-Earth-
# constructed representation of the track, so I'm just taking all the
# kml that was in that file and duplicating it without understanding what
# I'm doing...
import Specs
WaypointNumber = 0
KMLFileName = 'Plan.kml'
lonx = Specs.TakeoffLocation()[0]
latx = Specs.TakeoffLocation()[1]
galtx = Specs.TakeoffLocation()[2]
# header info for .kml file
def KMLHeader(KMLFileName):
"Opens the file and writes the required header."
# XXXX fix this
global WaypointNumber # changed here so needs to be global
KMLACFileName = KMLFileName.replace ('Plan', 'PlanAC')
print 'kml file name: ', KMLFileName, ', new name is: ', KMLACFileName
KMLFile = open(KMLFileName,'w')
KMLACFile = open(KMLACFileName,'w')
KMLFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
KMLFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n")
KMLFile.write("<Document>\n")
# might need to replace .kml with .kmz here?
KMLFile.write("\t <name>"+KMLFileName+"</name>\n")
KMLFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n")
KMLFile.write("\t\t<Pair>\n")
KMLFile.write("\t\t\t<key>normal</key>\n")
KMLFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\
+ "</styleUrl>\n")
KMLFile.write("\t\t</Pair>\n")
KMLFile.write("\t\t<Pair>\n")
KMLFile.write("\t\t\t<key>highlight</key>\n")
KMLFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\
+"</styleUrl>\n")
KMLFile.write("\t\t</Pair>\n")
KMLFile.write("\t</StyleMap>\n")
KMLFile.write("\t <Style id=\"sh_triangle_copy1\">\n")
KMLFile.write("\t\t <IconStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t\t <scale>0.8</scale>\n")
KMLFile.write("\t\t\t <Icon>\n")
# KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n")
KMLFile.write("\t\t\t </Icon>\n")
KMLFile.write("\t\t </IconStyle>\n")
KMLFile.write("\t\t <LabelStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t </LabelStyle>\n")
KMLFile.write("\t\t <LineStyle>\n")
KMLFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLFile.write("\t\t\t <width>2</width>\n")
KMLFile.write("\t\t </LineStyle>\n")
KMLFile.write("\t\t <ListStyle>\n")
KMLFile.write("\t\t </ListStyle>\n")
KMLFile.write("\t </Style>\n")
KMLFile.write("\t <Style id=\"sn_triangle_copy1\">\n")
KMLFile.write("\t\t <IconStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t\t <scale>0.8</scale>\n")
KMLFile.write("\t\t\t <Icon>\n")
# KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n")
KMLFile.write("\t\t\t </Icon>\n")
KMLFile.write("\t\t </IconStyle>\n")
KMLFile.write("\t\t <LabelStyle>\n")
KMLFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLFile.write("\t\t </LabelStyle>\n")
KMLFile.write("\t\t <LineStyle>\n")
KMLFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLFile.write("\t\t\t <width>2</width>\n")
KMLFile.write("\t\t </LineStyle>\n")
KMLFile.write("\t\t <ListStyle>\n")
KMLFile.write("\t\t </ListStyle>\n")
KMLFile.write("\t </Style>\n")
KMLACFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
KMLACFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n")
KMLACFile.write("<Document>\n")
# might need to replace .kml with .kmz here?
KMLACFile.write("\t <name>"+KMLACFileName+"</name>\n")
KMLACFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n")
KMLACFile.write("\t\t<Pair>\n")
KMLACFile.write("\t\t\t<key>normal</key>\n")
KMLACFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\
+ "</styleUrl>\n")
KMLACFile.write("\t\t</Pair>\n")
KMLACFile.write("\t\t<Pair>\n")
KMLACFile.write("\t\t\t<key>highlight</key>\n")
KMLACFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\
+"</styleUrl>\n")
KMLACFile.write("\t\t</Pair>\n")
KMLACFile.write("\t</StyleMap>\n")
KMLACFile.write("\t <Style id=\"sh_triangle_copy1\">\n")
KMLACFile.write("\t\t <IconStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t\t <scale>0.8</scale>\n")
KMLACFile.write("\t\t\t <Icon>\n")
# KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n")
KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n")
KMLACFile.write("\t\t\t </Icon>\n")
KMLACFile.write("\t\t </IconStyle>\n")
KMLACFile.write("\t\t <LabelStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t </LabelStyle>\n")
KMLACFile.write("\t\t <LineStyle>\n")
KMLACFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLACFile.write("\t\t\t <width>2</width>\n")
KMLACFile.write("\t\t </LineStyle>\n")
KMLACFile.write("\t\t <ListStyle>\n")
KMLACFile.write("\t\t </ListStyle>\n")
KMLACFile.write("\t </Style>\n")
KMLACFile.write("\t <Style id=\"sn_triangle_copy1\">\n")
KMLACFile.write("\t\t <IconStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t\t <scale>0.8</scale>\n")
KMLACFile.write("\t\t\t <Icon>\n")
KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n")
# KMLACFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/triangle.png</href>\n")
KMLACFile.write("\t\t\t </Icon>\n")
KMLACFile.write("\t\t </IconStyle>\n")
KMLACFile.write("\t\t <LabelStyle>\n")
KMLACFile.write("\t\t\t <color>ff0000ff</color>\n")
KMLACFile.write("\t\t </LabelStyle>\n")
KMLACFile.write("\t\t <LineStyle>\n")
KMLACFile.write("\t\t\t <color>ff00aaff</color>\n")
KMLACFile.write("\t\t\t <width>2</width>\n")
KMLACFile.write("\t\t </LineStyle>\n")
KMLACFile.write("\t\t <ListStyle>\n")
KMLACFile.write("\t\t </ListStyle>\n")
KMLACFile.write("\t </Style>\n")
WaypointNumber = 0
return(KMLFile, KMLACFile)
def KMLclose(KMLFile, KMLACFile):
"Adds trailer to the .kml file and then closes it."
KMLFile.write("</Document>\n")
KMLFile.write("</kml>\n")
KMLFile.close()
KMLACFile.write("</Document>\n")
KMLACFile.write("</kml>\n")
KMLACFile.close()
def PlotPoints (KMLFile, KMLACFile, points):
"Plot the set of points on the .kml file"
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLFile.write("\t\t <LineString>\n")
KMLFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLFile.write("\t\t\t <coordinates>\n")
for x in points:
KMLFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\
+ format (x[1], 'f') + ','\
+ format (x[2], 'f') + ' \n')
KMLFile.write("\t\t\t </coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </LineString>\n")
KMLFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <styleUrl>#sh_triangle_copy1</styleUrl>\n")
KMLACFile.write("\t\t <LineString>\n")
KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLACFile.write("\t\t\t <coordinates>\n")
for x in points:
KMLACFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\
+ format (x[1], 'f') + ','\
+ format (x[2], 'f') + ' \n')
KMLACFile.write("\t\t\t </coordinates>\n")
KMLACFile.write("\t\t </LineString>\n")
KMLACFile.write("\t </Placemark>\n")
def PlotWaypoint (KMLFile, KMLACFile, wp, label='', symbol = 'triangle'):
"Adds waypoint symbol to the .kml file for plotting on Google Earth etc."
# Copy from a Google-Earth-generated example
# (I don't understand all this; it's just copied verbatim here.
# It's likely this could be made more compact.)
global WaypointNumber, lonx, latx, galtx
# These are global because they are saved in order to
# draw lines from the last point to this one.
longitude = wp[0]
latitude = wp[1]
altitude = wp[2]
WaypointNumber += 1
if (label == ''): label="WP"+format(WaypointNumber,'d')
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <name>"+label+"</name>\n")
KMLFile.write("\t\t <description>WayPoint "\
+format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1"\
+"</styleUrl>\n")
KMLFile.write("\t\t <Point>\n")
KMLFile.write("\t\t\t <coordinates>"+format(longitude,'f')\
+','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </Point>\n")
KMLFile.write("\t </Placemark>\n")
KMLFile.write("\t <Placemark>\n")
KMLFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n")
KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLFile.write("\t\t <LineString>\n")
KMLFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLFile.write("\t\t\t <coordinates>\n")
KMLFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\
+format(galtx,'f')+' '+format(longitude,'f')+','\
+format(latitude,'f')+','+format(altitude,'f')+'\n')
# print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\
# +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f')
KMLFile.write("\t\t\t </coordinates>\n")
KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n")
KMLFile.write("\t\t </LineString>\n")
KMLFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <name>"+label+"</name>\n")
KMLACFile.write("\t\t <description>WayPoint "\
+format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n")
KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1"\
+"</styleUrl>\n")
KMLACFile.write("\t\t <Point>\n")
KMLACFile.write("\t\t\t <coordinates>"+format(longitude,'f')\
+','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n")
KMLACFile.write("\t\t </Point>\n")
KMLACFile.write("\t </Placemark>\n")
KMLACFile.write("\t <Placemark>\n")
KMLACFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n")
KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n")
KMLACFile.write("\t\t <LineString>\n")
KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n")
KMLACFile.write("\t\t\t <coordinates>\n")
KMLACFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\
+format(galtx,'f')+' '+format(longitude,'f')+','\
+format(latitude,'f')+','+format(altitude,'f')+'\n')
# print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\
# +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f')
KMLACFile.write("\t\t\t </coordinates>\n")
KMLACFile.write("\t\t </LineString>\n")
KMLACFile.write("\t </Placemark>\n")
lonx = longitude
latx = latitude
galtx = altitude
return()
| 48.430279
| 116
| 0.618707
| 1,772
| 12,156
| 4.221219
| 0.124718
| 0.05508
| 0.120722
| 0.138503
| 0.759492
| 0.753877
| 0.745989
| 0.741176
| 0.714171
| 0.700802
| 0
| 0.012556
| 0.154821
| 12,156
| 250
| 117
| 48.624
| 0.715495
| 0.147746
| 0
| 0.627907
| 0
| 0.018605
| 0.415222
| 0.102837
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004651
| null | null | 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6edc5a13e61a1bdcdf25bd7cc6d12ff98125bfdd
| 39
|
py
|
Python
|
Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
import sys
print(sys.version_info[:2])
| 13
| 27
| 0.769231
| 7
| 39
| 4.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.076923
| 39
| 2
| 28
| 19.5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
6e0cf115db4bb95a08b1d4ece55fa11c8d6418e1
| 222
|
py
|
Python
|
src/mot/motion_models/__init__.py
|
neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python
|
886cd9e87283982381713dbf2e4ef695030f81de
|
[
"Apache-2.0"
] | 6
|
2021-11-21T10:47:01.000Z
|
2022-03-17T01:14:53.000Z
|
src/mot/motion_models/__init__.py
|
neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python
|
886cd9e87283982381713dbf2e4ef695030f81de
|
[
"Apache-2.0"
] | 3
|
2021-04-12T12:37:41.000Z
|
2021-04-30T14:29:53.000Z
|
src/mot/motion_models/__init__.py
|
neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python
|
886cd9e87283982381713dbf2e4ef695030f81de
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from mot.motion_models.base_motion_model import MotionModel
from mot.motion_models.CT_motion_model import CoordinateTurnMotionModel
from mot.motion_models.CV_motion_model import ConstantVelocityMotionModel
| 37
| 73
| 0.891892
| 29
| 222
| 6.517241
| 0.482759
| 0.111111
| 0.206349
| 0.301587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004854
| 0.072072
| 222
| 5
| 74
| 44.4
| 0.912621
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
28461474953cc9c257de317f17581d4ef1a01795
| 18,209
|
py
|
Python
|
DQN/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | 2
|
2021-04-12T02:41:00.000Z
|
2021-05-15T02:18:15.000Z
|
DQN/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
DQN/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
def fc2d(inputs,
num_outputs,
activation_fn,
scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as s:
n0, n1, n2 = inputs.get_shape().as_list()
weights = tf.get_variable(name='weights',
shape=[n2, num_outputs],
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
wx = tf.einsum('ijk,kl->ijl', inputs, weights)
biases = tf.get_variable(name='biases',
shape=[num_outputs],
initializer=tf.zeros_initializer(),
trainable=True)
wx_b = wx + biases
result = wx_b if activation_fn is None else activation_fn(wx_b, name=s.name)
return result
def conv3d(scope_name,
input,
filter_size):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
conv_filter = tf.get_variable(name='weights',
shape=filter_size,
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
conv = tf.nn.conv3d(input=input,
filter=conv_filter,
strides=[1, 1, 1, 1, 1],
padding='VALID')
biases = tf.get_variable(name='biases',
shape=[filter_size[-1]],
initializer=tf.zeros_initializer(),
trainable=True)
bias = tf.nn.bias_add(conv, biases)
result = tf.nn.relu(bias, name=scope.name)
return result
class Highlevel_Network():
def __init__(self,
window_size,
num_labels,
# action_size,
history_steps,
scope
):
with tf.variable_scope('highlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
qvalue = slim.fully_connected(inputs=embed_feature,
num_outputs=num_labels,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='qvalue')
self.qvalue = qvalue
terminations = slim.fully_connected(inputs=embed_feature,
num_outputs=num_labels,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='termination')
self.terminations = tf.sigmoid(terminations)
# highlevel training
if not scope.startswith('global'):
self.chosen_objects = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_qvalue = tf.placeholder(shape=[None], dtype=tf.float32)
self.highlevel_lr = tf.placeholder(dtype=tf.float32)
object_onehot = tf.one_hot(self.chosen_objects, num_labels, dtype=tf.float32)
qvalue_for_chosen_object = tf.reduce_sum(self.qvalue*object_onehot, axis=1)
td_error = tf.square(self.target_qvalue - qvalue_for_chosen_object)
self.qvalue_loss = 0.5*tf.reduce_mean(td_error)
highlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.highlevel_lr)
highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'highlevel/%s' % scope)
gradients = tf.gradients(self.qvalue_loss, highlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
global_highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'highlevel/global/main')
self.highlevel_update = highlevel_trainer.apply_gradients(zip(norm_gradients, global_highlevel_params))
class Lowlevel_Network():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.subtargets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
subtargets_expanded = tf.tile(tf.expand_dims(self.subtargets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * subtargets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.qvalues = slim.fully_connected(inputs=hidden_value,
num_outputs=action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='qvalue')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_qvalues = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
qvalues_for_chosen_actions = tf.reduce_sum(self.qvalues * actions_onehot, axis=-1)
self.qvalue_loss = 0.5 * tf.reduce_mean(tf.square(self.target_qvalues - qvalues_for_chosen_actions))
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s'%scope)
gradients = tf.gradients(self.qvalue_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/global/in/main')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
class Lowlevel_Network_ex():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
action_qvalues = slim.fully_connected(inputs=embed_feature,
num_outputs=action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='action_qvalue')
self.action_qvalues = action_qvalues
# highlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_action_qvalues = tf.placeholder(shape=[None], dtype=tf.float32)
self.highlevel_lr = tf.placeholder(dtype=tf.float32)
action_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
qvalue_for_chosen_action = tf.reduce_sum(self.action_qvalues * action_onehot, axis=1)
td_error = tf.square(self.target_action_qvalues - qvalue_for_chosen_action)
self.action_qvalue_loss = 0.5 * tf.reduce_mean(td_error)
highlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.highlevel_lr)
highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s' % scope)
gradients = tf.gradients(self.action_qvalue_loss, highlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
global_highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'lowlevel/global/ex/main')
self.highlevel_update = highlevel_trainer.apply_gradients(
zip(norm_gradients, global_highlevel_params))
| 55.012085
| 123
| 0.473228
| 1,512
| 18,209
| 5.425265
| 0.10119
| 0.057052
| 0.043886
| 0.057052
| 0.841278
| 0.824698
| 0.795563
| 0.7724
| 0.753627
| 0.719859
| 0
| 0.012428
| 0.456478
| 18,209
| 330
| 124
| 55.178788
| 0.816409
| 0.005876
| 0
| 0.662651
| 0
| 0
| 0.021574
| 0.003706
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02008
| false
| 0
| 0.008032
| 0
| 0.048193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
953d34fa43582a04419407658a07c6d2cffc68aa
| 187
|
py
|
Python
|
tests/strategies/__init__.py
|
lycantropos/rsrc_web
|
6702840befa4fa70114ce10543144410b453aa30
|
[
"MIT"
] | null | null | null |
tests/strategies/__init__.py
|
lycantropos/rsrc_web
|
6702840befa4fa70114ce10543144410b453aa30
|
[
"MIT"
] | 4
|
2019-06-18T18:36:50.000Z
|
2019-07-10T13:14:48.000Z
|
tests/strategies/__init__.py
|
lycantropos/rsrc_web
|
6702840befa4fa70114ce10543144410b453aa30
|
[
"MIT"
] | null | null | null |
from .literals import booleans
from .models import (readable_web_streams,
web_streams,
writeable_web_streams)
from .paths import web_url_strings
| 31.166667
| 43
| 0.663102
| 21
| 187
| 5.571429
| 0.571429
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.299465
| 187
| 5
| 44
| 37.4
| 0.89313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2516f01f8f44e4e51781ce4ffc642a90318eac4f
| 129
|
py
|
Python
|
Lib/site-packages/git/index/__init__.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 10
|
2021-05-31T07:18:08.000Z
|
2022-03-19T09:20:11.000Z
|
Lib/site-packages/git/index/__init__.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 10
|
2017-05-10T08:10:23.000Z
|
2020-03-23T10:23:37.000Z
|
Lib/site-packages/git/index/__init__.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 38
|
2017-04-26T14:13:37.000Z
|
2021-06-24T11:36:38.000Z
|
"""Initialize the index package"""
# flake8: noqa
from __future__ import absolute_import
from .base import *
from .typ import *
| 18.428571
| 38
| 0.751938
| 17
| 129
| 5.411765
| 0.705882
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.155039
| 129
| 6
| 39
| 21.5
| 0.834862
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
251e7d6fbbff67cb94790461d92eb77f3f88ed53
| 111
|
py
|
Python
|
comet/handler/__init__.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | 15
|
2015-11-29T18:53:58.000Z
|
2022-03-09T15:47:30.000Z
|
comet/handler/__init__.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | 29
|
2016-01-21T18:10:45.000Z
|
2021-10-01T16:41:12.000Z
|
comet/handler/__init__.py
|
shinybrar/Comet
|
4229092fca74c130a7d4ecd4dbd22ae85f7e6308
|
[
"BSD-2-Clause"
] | 11
|
2016-01-22T14:05:51.000Z
|
2022-03-09T17:49:56.000Z
|
# Comet VOEvent Broker.
# Event handlers.
from comet.handler.relay import *
from comet.handler.spawn import *
| 18.5
| 33
| 0.765766
| 15
| 111
| 5.666667
| 0.666667
| 0.211765
| 0.376471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 111
| 5
| 34
| 22.2
| 0.894737
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
25446e5536422db53c3887d8fec73e5ede336aa7
| 5,460
|
py
|
Python
|
test/test_tensor_reorganization.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | 17
|
2020-02-04T16:42:40.000Z
|
2021-11-11T14:37:32.000Z
|
test/test_tensor_reorganization.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | null | null | null |
test/test_tensor_reorganization.py
|
entn-at/BrnoLM
|
9f8c62523382098809c1c0967f62a67d151eafe0
|
[
"MIT"
] | 4
|
2020-02-04T12:59:04.000Z
|
2021-05-30T14:10:54.000Z
|
from brnolm.runtime.tensor_reorganization import TensorReorganizer
import torch
from torch.autograd import Variable
from .common import TestCase
class Dummy_lstm():
def __init__(self, nb_hidden):
self._nb_hidden = nb_hidden
def init_hidden(self, batch_size):
return (
torch.FloatTensor([[[0.0] * self._nb_hidden] * batch_size] * 2),
torch.FloatTensor([[[0.0] * self._nb_hidden] * batch_size] * 2)
)
class TensorReorganizerTests(TestCase):
def setUp(self):
self.lm = Dummy_lstm(nb_hidden=2)
self.reorganizer = TensorReorganizer(self.lm.init_hidden)
def test_passing(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_shrinks(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 2])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.1, 0.1], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [3, 3]]]*2),
)
self.assertEqual(new_h, expected)
def test_requires_bsz_greater_than_mask(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 1, 2])
bsz = 2
self.assertRaises(ValueError, self.reorganizer, last_h, mask, bsz)
def test_on_empty_mask_zeros(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = self.lm.init_hidden(bsz)
self.assertEqual(new_h, expected)
def test_completion_by_zeros(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([1])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.2, 0.2], [0.0, 0.0]]]*2),
torch.FloatTensor([[[2.0, 2.0], [0.0, 0.0]]]*2),
)
self.assertEqual(new_h, expected)
def test_bug_regression_single_addition(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.2, 0.2], [0.3, 0.3], [0.0, 0.0]]]*2),
torch.FloatTensor([[[2.0, 2.0], [3.0, 3.0], [0.0, 0.0]]]*2),
)
self.assertEqual(new_h, expected)
class Dummy_srn():
def __init__(self, nb_hidden):
self._nb_hidden = nb_hidden
self._nb_layers = 1
def init_hidden(self, batch_size):
return torch.FloatTensor(self._nb_layers, batch_size, self._nb_hidden).zero_()
class TensorReorganizerTests_SRN(TestCase):
def setUp(self):
lm = Dummy_srn(nb_hidden=2)
self.reorganizer = TensorReorganizer(lm.init_hidden)
def test_passing(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_passing_variables(self):
last_h = Variable(torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]))
mask = Variable(torch.LongTensor([0, 1, 2]))
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_shrinks(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 2])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.1, 0.1], [0.3, 0.3]]])
self.assertEqual(new_h, expected)
def test_requires_bsz_greater_than_mask(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 1, 2])
bsz = 2
self.assertRaises(ValueError, self.reorganizer, last_h, mask, bsz)
def test_on_empty_mask_zeros(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.0, 0.0], [0.0, 0.0]]])
self.assertEqual(new_h, expected)
def test_completion_by_zeros(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([1])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.2, 0.2], [0.0, 0.0]]])
self.assertEqual(new_h, expected)
| 30.502793
| 86
| 0.540842
| 802
| 5,460
| 3.524938
| 0.07606
| 0.026884
| 0.033958
| 0.024054
| 0.853201
| 0.85214
| 0.823134
| 0.822073
| 0.819243
| 0.773258
| 0
| 0.080425
| 0.275824
| 5,460
| 178
| 87
| 30.674157
| 0.634547
| 0
| 0
| 0.676923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 1
| 0.138462
| false
| 0.023077
| 0.030769
| 0.015385
| 0.215385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.