hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
041f8a2e2d465dac715827f2f1568c5808c50c25
| 109
|
py
|
Python
|
signalfx_azure_function_python/version.py
|
claranet/signalfx-azure-function-python
|
e343d1f25615c7343a588a04f5edc8331ff544ca
|
[
"Apache-2.0"
] | null | null | null |
signalfx_azure_function_python/version.py
|
claranet/signalfx-azure-function-python
|
e343d1f25615c7343a588a04f5edc8331ff544ca
|
[
"Apache-2.0"
] | 1
|
2021-02-17T14:08:46.000Z
|
2021-02-17T14:08:46.000Z
|
signalfx_azure_function_python/version.py
|
claranet/signalfx-azure-function-python
|
e343d1f25615c7343a588a04f5edc8331ff544ca
|
[
"Apache-2.0"
] | null | null | null |
name = "signalfx-azure-function-python"
version = "1.0.1"
user_agent = f"signalfx_azure_function/{version}"
| 21.8
| 49
| 0.752294
| 16
| 109
| 4.9375
| 0.6875
| 0.329114
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.091743
| 109
| 4
| 50
| 27.25
| 0.767677
| 0
| 0
| 0
| 0
| 0
| 0.623853
| 0.577982
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
043e794cf6d16e7d01417fbdd4a093412ea9a042
| 32
|
py
|
Python
|
rasloader/__init__.py
|
ttruttmann/rasloader
|
87995d4dc64ae72e094e2171591335d0d3733408
|
[
"MIT"
] | 1
|
2022-02-16T03:49:51.000Z
|
2022-02-16T03:49:51.000Z
|
rasloader/__init__.py
|
ttruttmann/rasloader
|
87995d4dc64ae72e094e2171591335d0d3733408
|
[
"MIT"
] | null | null | null |
rasloader/__init__.py
|
ttruttmann/rasloader
|
87995d4dc64ae72e094e2171591335d0d3733408
|
[
"MIT"
] | null | null | null |
from .rasloader import RasLoader
| 32
| 32
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
045cb4795d7f44dc08bf536b5b969bd4ca469aca
| 25
|
py
|
Python
|
Algorithms/Util/__init__.py
|
grayy921013/RecSys
|
ce0683b86755935c943722cbba5541931978498e
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Util/__init__.py
|
grayy921013/RecSys
|
ce0683b86755935c943722cbba5541931978498e
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Util/__init__.py
|
grayy921013/RecSys
|
ce0683b86755935c943722cbba5541931978498e
|
[
"Apache-2.0"
] | null | null | null |
from .enums import Field
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0c0b65819250aeeb4d1d52449b01afc7e5788f9
| 1,996
|
py
|
Python
|
cg_security/hasher.py
|
ConnectSW/cg_security
|
6eb9807183854cb96f0a7c53501b968569578c7a
|
[
"MIT"
] | null | null | null |
cg_security/hasher.py
|
ConnectSW/cg_security
|
6eb9807183854cb96f0a7c53501b968569578c7a
|
[
"MIT"
] | null | null | null |
cg_security/hasher.py
|
ConnectSW/cg_security
|
6eb9807183854cb96f0a7c53501b968569578c7a
|
[
"MIT"
] | null | null | null |
import hashlib
from .security_exception import SecurityException
def blake2b(plaintext) -> str:
"""
Cria o hash Blake2b da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.blake2b(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
def blake2s(plaintext):
"""
Cria o hash Blake2s da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.blake2s(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
def md5_hash(plaintext):
"""
Cria o hash MD5 da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.md5(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
def sha1_hash(plaintext):
"""
Cria o hash SHA1 da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.sha1(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
def sha256_hash(plaintext):
"""
Cria o hash SHA256 da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.sha256(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
def sha512_hash(plaintext):
"""
Cria o hash SHA512 da string
:param plaintext: String a ser feito o hash
:type plaintext: str
:return: Hash
:rtype: str
"""
try:
return hashlib.sha512(plaintext.encode()).hexdigest()
except Exception as e:
raise SecurityException(str(e))
| 23.209302
| 62
| 0.634269
| 246
| 1,996
| 5.126016
| 0.138211
| 0.047581
| 0.042823
| 0.104679
| 0.842982
| 0.773196
| 0.773196
| 0.773196
| 0.773196
| 0.773196
| 0
| 0.020534
| 0.268036
| 1,996
| 86
| 63
| 23.209302
| 0.842574
| 0.358717
| 0
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0d84faefbbb55023852873eddb5e7c9fe579f36
| 28
|
py
|
Python
|
main_2.py
|
jessicafinnie/testGitProjectForClass
|
eff728cc5eb3e8370729a378dbca6226f310477c
|
[
"MIT"
] | null | null | null |
main_2.py
|
jessicafinnie/testGitProjectForClass
|
eff728cc5eb3e8370729a378dbca6226f310477c
|
[
"MIT"
] | null | null | null |
main_2.py
|
jessicafinnie/testGitProjectForClass
|
eff728cc5eb3e8370729a378dbca6226f310477c
|
[
"MIT"
] | null | null | null |
print("Hello again, world!")
| 28
| 28
| 0.714286
| 4
| 28
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 28
| 1
| 28
| 28
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0b0161e4d690d3bc961d632ecca9a94ac592e7a5
| 152
|
py
|
Python
|
stats-essentials/data/weather.py
|
chyld/demoX
|
27f26a553aeb6682173f6b1b8dc8969101993324
|
[
"MIT"
] | 16
|
2018-09-21T23:14:35.000Z
|
2022-01-21T10:38:52.000Z
|
stats-essentials/data/weather.py
|
chyld/demoX
|
27f26a553aeb6682173f6b1b8dc8969101993324
|
[
"MIT"
] | null | null | null |
stats-essentials/data/weather.py
|
chyld/demoX
|
27f26a553aeb6682173f6b1b8dc8969101993324
|
[
"MIT"
] | 27
|
2018-01-08T22:59:38.000Z
|
2022-02-09T06:44:38.000Z
|
from pprint import pprint
import requests
r = requests.get('http://api.openweathermap.org/data/2.5/weather?q=London&APPID={APIKEY}')
pprint(r.json())
| 21.714286
| 90
| 0.75
| 24
| 152
| 4.75
| 0.791667
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.078947
| 152
| 6
| 91
| 25.333333
| 0.8
| 0
| 0
| 0
| 0
| 0.25
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
0b0b23a900cfd52f65edf59de035656db09ad4da
| 168
|
py
|
Python
|
lume/src/infrastructure/services/logger/print_logger.py
|
alice-biometrics/lume
|
a5605f75f05c9007e164e0644dd34f83dfbdfc7d
|
[
"MIT"
] | 15
|
2020-03-20T19:33:07.000Z
|
2022-01-07T15:11:08.000Z
|
lume/src/infrastructure/services/logger/print_logger.py
|
alice-biometrics/lume
|
a5605f75f05c9007e164e0644dd34f83dfbdfc7d
|
[
"MIT"
] | 18
|
2020-04-07T10:53:30.000Z
|
2022-01-24T07:13:39.000Z
|
lume/src/infrastructure/services/logger/print_logger.py
|
alice-biometrics/lume
|
a5605f75f05c9007e164e0644dd34f83dfbdfc7d
|
[
"MIT"
] | null | null | null |
from lume.src.domain.services.logger import Logger
class PrintLogger(Logger):
def log(self, logging_level, message):
print(f"{logging_level}: {message}")
| 24
| 50
| 0.720238
| 22
| 168
| 5.409091
| 0.772727
| 0.201681
| 0.319328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 168
| 6
| 51
| 28
| 0.838028
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9bf8fb256889f7bd1d8f1be1e5b57ae06f8da239
| 69
|
py
|
Python
|
usecase_2_calling_v2.py
|
docktermj/python-future-proofing-apis
|
ae8ac783a14c6f9d4050ad2545c82f96fb990a5c
|
[
"Apache-2.0"
] | null | null | null |
usecase_2_calling_v2.py
|
docktermj/python-future-proofing-apis
|
ae8ac783a14c6f9d4050ad2545c82f96fb990a5c
|
[
"Apache-2.0"
] | null | null | null |
usecase_2_calling_v2.py
|
docktermj/python-future-proofing-apis
|
ae8ac783a14c6f9d4050ad2545c82f96fb990a5c
|
[
"Apache-2.0"
] | null | null | null |
from api_version_2 import stable_api
stable_api(1, 2)
stable_api(1)
| 13.8
| 36
| 0.811594
| 14
| 69
| 3.642857
| 0.5
| 0.529412
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 0.115942
| 69
| 4
| 37
| 17.25
| 0.770492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
acc3263bbbd98cd69fd0b1982db742b42a06b382
| 237
|
py
|
Python
|
swagger_server/test/test_ols.py
|
bigbio/sdrfcheck-api
|
c634113cfbcc3ea81b9127a67d76975adbe6aec5
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/test/test_ols.py
|
bigbio/sdrfcheck-api
|
c634113cfbcc3ea81b9127a67d76975adbe6aec5
|
[
"Apache-2.0"
] | 1
|
2020-04-30T08:52:21.000Z
|
2020-04-30T08:58:30.000Z
|
swagger_server/test/test_ols.py
|
bigbio/sdrfcheck-api
|
c634113cfbcc3ea81b9127a67d76975adbe6aec5
|
[
"Apache-2.0"
] | null | null | null |
def test_besthit():
assert False
def test_get_term():
assert False
def test_get_ancestors():
assert False
def test_search():
assert False
def test_suggest():
assert False
def test_select():
assert False
| 10.304348
| 25
| 0.679325
| 32
| 237
| 4.78125
| 0.34375
| 0.27451
| 0.457516
| 0.588235
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244726
| 237
| 22
| 26
| 10.772727
| 0.854749
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c582ad7fb26907b8c1e9561889e4123ef92da1ef
| 3,886
|
py
|
Python
|
results/models.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
results/models.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
results/models.py
|
lilbex/bitcom
|
c0d09155b655de3ebe84851f24e5c07ef60da611
|
[
"MIT"
] | null | null | null |
from django.db import models
class agentname(models.Model):
name_id = models.IntegerField(primary_key=True, editable=False)
firstname = models.CharField(max_length=200)
lastname = models.CharField(max_length=200)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
pollingunit_uniqueid = models.IntegerField()
class announced_lga_results(models.Model):
result_id = models.IntegerField(primary_key=True, editable=False)
lga_name = models.CharField(max_length=200)
party_abbreviation = models.CharField(max_length=50)
party_score = models.IntegerField()
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=200)
class announced_pu_results(models.Model):
result_id = models.AutoField(primary_key=True, editable=False)
polling_unit_uniqueid = models.CharField(max_length=200)
party_abbreviation = models.CharField(max_length=50)
party_score = models.IntegerField()
entered_by_user = models.CharField(max_length=7)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=100)
class announced_state_results(models.Model):
result_id = models.IntegerField(primary_key=True, editable=False)
state_name = models.CharField(max_length=200)
party_abbreviation = models.CharField(max_length=50)
party_score = models.IntegerField()
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=100)
class announced_ward_results(models.Model):
result_id = models.IntegerField(primary_key=True, editable=False)
ward_name = models.CharField(max_length=200)
party_abbreviation = models.CharField(max_length=50)
party_score = models.IntegerField()
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=100)
class lga(models.Model):
uniqueid = models.IntegerField(primary_key=True, editable=False)
lga_id = models.IntegerField()
lga_name = models.CharField(max_length=200)
state_id = models.IntegerField()
lga_description = models.TextField()
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField(max_length=200)
user_ip_address = models.CharField(max_length=200)
class party(models.Model):
id = models.IntegerField(primary_key=True, editable=False)
partyid = models.CharField(max_length=200)
partyname = models.CharField(max_length=50)
class polling_unit(models.Model):
uniqueid = models.IntegerField(primary_key=True, editable=False)
polling_unit_id = models.IntegerField()
ward_id = models.IntegerField()
lga_id = models.IntegerField()
uniquewardid = models.IntegerField()
polling_unit_number = models.CharField(max_length=200)
polling_unit_name = models.CharField(max_length=200)
polling_unit_description = models.TextField()
lat = models.CharField(max_length=50)
long = models.CharField(max_length=200)
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=200)
class states(models.Model):
state_id = models.IntegerField(unique=True, primary_key=True, editable=False)
state_name = models.CharField(max_length=200)
class ward(models.Model):
uniqueid = models.IntegerField(unique=True, primary_key=True, editable=False)
ward_id = models.IntegerField()
ward_name = models.CharField(max_length=50)
lga_id = models.IntegerField()
ward_description = models.TextField()
entered_by_user = models.CharField(max_length=200)
date_entered = models.DateTimeField()
user_ip_address = models.CharField(max_length=50)
| 39.252525
| 81
| 0.764539
| 494
| 3,886
| 5.753036
| 0.119433
| 0.114004
| 0.221675
| 0.295567
| 0.822308
| 0.738916
| 0.710415
| 0.654469
| 0.619282
| 0.574243
| 0
| 0.029219
| 0.136902
| 3,886
| 98
| 82
| 39.653061
| 0.818128
| 0
| 0
| 0.506329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012658
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
c58b30ff4fa86fcf902e661e654dad96f910108c
| 135
|
py
|
Python
|
stuff_for_python_socket/trick2/__init__.py
|
elafontaine/socket_presentation
|
06113fd3c3eec257b45951cc26471ff0bbf1af0f
|
[
"MIT"
] | null | null | null |
stuff_for_python_socket/trick2/__init__.py
|
elafontaine/socket_presentation
|
06113fd3c3eec257b45951cc26471ff0bbf1af0f
|
[
"MIT"
] | null | null | null |
stuff_for_python_socket/trick2/__init__.py
|
elafontaine/socket_presentation
|
06113fd3c3eec257b45951cc26471ff0bbf1af0f
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
print("{file} is main".format(file=__file__))
else:
print("{file} is loaded".format(file=__file__))
| 27
| 51
| 0.666667
| 18
| 135
| 4.111111
| 0.5
| 0.243243
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140741
| 135
| 5
| 51
| 27
| 0.637931
| 0
| 0
| 0
| 0
| 0
| 0.281481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c5aa1ecc6d9e753df84b01e1f6c18d7caa9a5034
| 10,793
|
py
|
Python
|
src/tests/test_pagure_lib_add_user_to_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/tests/test_pagure_lib_add_user_to_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/tests/test_pagure_lib_add_user_to_project.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import unittest
import sys
import os
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
class PagureLibAddUserToProjecttests(tests.Modeltests):
""" Tests for pagure.lib.query.add_user_to_project """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibAddUserToProjecttests, self).setUp()
tests.create_projects(self.session)
item = pagure.lib.model.User(
user="bar",
fullname="bar baz",
password="foo",
default_email="bar@bar.com",
)
self.session.add(item)
item = pagure.lib.model.UserEmail(user_id=3, email="bar@bar.com")
self.session.add(item)
self.session.commit()
# Before
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 0)
msg = pagure.lib.query.add_user_to_project(
session=self.session, project=repo, new_user="foo", user="pingou"
)
self.session.commit()
self.assertEqual(msg, "User added")
# After
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.admins[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_re_add_user_to_project_default(self):
""" Update an existing user but to the same access level. """
repo = pagure.lib.query._get_project(self.session, "test")
# Try adding the same user with the same access
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.query.add_user_to_project,
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
)
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_user_to_project_default(self):
"""Update an existing user without any required group membership."""
repo = pagure.lib.query._get_project(self.session, "test")
# Update the access of the user
msg = pagure.lib.query.add_user_to_project(
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="commit",
)
self.session.commit()
self.assertEqual(msg, "User access updated")
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_user_to_project_require_packager_on_all(self):
"""
Update an existing user but required group membership on all
projects.
"""
repo = pagure.lib.query._get_project(self.session, "test")
config = {"*": ["packager"]}
# Update the access of the user
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.query.add_user_to_project,
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
required_groups=config,
)
self.session.commit()
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_user_to_project_require_packager_on_st(self):
"""
Update an existing user but required group membership on all
projects match *st.
"""
repo = pagure.lib.query._get_project(self.session, "test")
config = {"*st": ["packager"]}
# Update the access of the user
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.query.add_user_to_project,
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
required_groups=config,
)
self.session.commit()
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_user_to_project_require_packager_on_te(self):
"""
Update an existing user but required group membership on all
projects match te*.
"""
repo = pagure.lib.query._get_project(self.session, "test")
config = {"te*": ["packager"]}
# Update the access of the user
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.query.add_user_to_project,
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
required_groups=config,
)
self.session.commit()
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_update_user_to_project_require_packager_on_test(self):
"""
Update an existing user but required group membership on a specific
project: test.
"""
repo = pagure.lib.query._get_project(self.session, "test")
config = {"test": ["packager"]}
# Update the access of the user
self.assertRaises(
pagure.exceptions.PagureException,
pagure.lib.query.add_user_to_project,
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
required_groups=config,
)
self.session.commit()
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_add_user_to_test2_require_packager_on_test(self):
"""
Add user to project test2 while the configuration requires group
membership on the project test.
"""
repo = pagure.lib.query._get_project(self.session, "test2")
self.assertEqual(len(repo.users), 0)
config = {"test": ["packager"]}
# Add the user
pagure.lib.query.add_user_to_project(
session=self.session,
project=repo,
new_user="foo",
user="pingou",
access="admin",
required_groups=config,
)
self.session.commit()
self.assertEqual(len(repo.users), 1)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
class PagureLibAddUserToProjectWithGrouptests(PagureLibAddUserToProjecttests):
""" Tests for pagure.lib.query.add_user_to_project """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PagureLibAddUserToProjectWithGrouptests, self).setUp()
# Create group
msg = pagure.lib.query.add_group(
self.session,
group_name="packager",
display_name="packager",
description="The Fedora packager groups",
group_type="user",
user="pingou",
is_admin=False,
blacklist=[],
)
self.session.commit()
self.assertEqual(msg, "User `pingou` added to the group `packager`.")
# Add user to group
group = pagure.lib.query.search_groups(
self.session, group_name="packager"
)
msg = pagure.lib.query.add_user_to_group(
self.session,
username="bar",
group=group,
user="pingou",
is_admin=True,
)
self.session.commit()
self.assertEqual(msg, "User `bar` added to the group `packager`.")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_add_user_to_test_require_packager_on_test(self):
"""
Add user to project test while the configuration requires group
membership on the project test.
"""
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 1)
config = {"test": ["packager"]}
# Add the user to the project
pagure.lib.query.add_user_to_project(
session=self.session,
project=repo,
new_user="bar",
user="pingou",
access="commit",
required_groups=config,
)
self.session.commit()
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 2)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
self.assertEqual(repo.users[1].user, "bar")
self.assertEqual(repo.committers[1].user, "bar")
@patch("pagure.lib.notify.send_email", MagicMock(return_value=True))
def test_add_user_to_test_require_packager(self):
"""
Add user to project test while the configuration requires group
membership on all the projects.
"""
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 1)
config = {"*": ["packager"]}
# Add the user to the project
pagure.lib.query.add_user_to_project(
session=self.session,
project=repo,
new_user="bar",
user="pingou",
access="commit",
required_groups=config,
)
self.session.commit()
repo = pagure.lib.query._get_project(self.session, "test")
self.assertEqual(len(repo.users), 2)
self.assertEqual(repo.users[0].user, "foo")
self.assertEqual(repo.committers[0].user, "foo")
self.assertEqual(repo.users[1].user, "bar")
self.assertEqual(repo.committers[1].user, "bar")
if __name__ == "__main__":
unittest.main(verbosity=2)
| 33.623053
| 78
| 0.604281
| 1,245
| 10,793
| 5.082731
| 0.116466
| 0.071271
| 0.064159
| 0.040455
| 0.832965
| 0.813053
| 0.799147
| 0.770386
| 0.760272
| 0.727402
| 0
| 0.005882
| 0.275364
| 10,793
| 320
| 79
| 33.728125
| 0.803222
| 0.123784
| 0
| 0.690909
| 0
| 0
| 0.088562
| 0.027451
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.05
| false
| 0.004545
| 0.031818
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5c45df4c969a48aa83cf4183515174e9440ad72
| 117
|
py
|
Python
|
ccc_client_dev.py
|
ohsu-comp-bio/ccc_client
|
433a7fad3d8e6817362678b783110f38ca81e0a7
|
[
"MIT"
] | null | null | null |
ccc_client_dev.py
|
ohsu-comp-bio/ccc_client
|
433a7fad3d8e6817362678b783110f38ca81e0a7
|
[
"MIT"
] | null | null | null |
ccc_client_dev.py
|
ohsu-comp-bio/ccc_client
|
433a7fad3d8e6817362678b783110f38ca81e0a7
|
[
"MIT"
] | null | null | null |
"""
ccc_client entrypoint script
"""
import ccc_client.cli
if __name__ == "__main__":
ccc_client.cli.cli_main()
| 14.625
| 29
| 0.717949
| 16
| 117
| 4.5
| 0.5625
| 0.375
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 117
| 7
| 30
| 16.714286
| 0.72
| 0.239316
| 0
| 0
| 0
| 0
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c5ff71800837220027bc1b54d138c4b03b406cbc
| 126
|
py
|
Python
|
TextCNN/data/MR/__init__.py
|
wangtao666666/NLP
|
6c1507b532800ef2f40fcf8450c3eb414816302f
|
[
"MIT"
] | 2
|
2021-05-09T13:17:37.000Z
|
2021-06-06T08:58:53.000Z
|
TextCNN/data/MR/__init__.py
|
wangtao666666/NLP
|
6c1507b532800ef2f40fcf8450c3eb414816302f
|
[
"MIT"
] | null | null | null |
TextCNN/data/MR/__init__.py
|
wangtao666666/NLP
|
6c1507b532800ef2f40fcf8450c3eb414816302f
|
[
"MIT"
] | 1
|
2020-11-04T06:33:21.000Z
|
2020-11-04T06:33:21.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/15 上午10:59
# @Author : TaoWang
# @Description :
from .MR_Dataset import MR_Dataset
| 18
| 34
| 0.650794
| 18
| 126
| 4.444444
| 0.888889
| 0.225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.174603
| 126
| 7
| 34
| 18
| 0.644231
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a85096ce0206606764119c3d0362bf1068fcb4a5
| 1,387
|
py
|
Python
|
2021/dayFour/dayFour.py
|
joshuagornall/Advent-of-Code
|
8f1e3affb8504309c418dd64b45d1e1912462da8
|
[
"MIT"
] | null | null | null |
2021/dayFour/dayFour.py
|
joshuagornall/Advent-of-Code
|
8f1e3affb8504309c418dd64b45d1e1912462da8
|
[
"MIT"
] | null | null | null |
2021/dayFour/dayFour.py
|
joshuagornall/Advent-of-Code
|
8f1e3affb8504309c418dd64b45d1e1912462da8
|
[
"MIT"
] | null | null | null |
# Part 1
data = data.split("\r\n\r\n")
nums = list(map(int, data[0].split(",")))
boards = []
for k in data[1:]:
boards.append([])
for j in k.splitlines():
boards[-1].append(list(map(int, j.split())))
for num in nums:
for board in boards:
for row in board:
for i in range(len(row)):
if row[i] == num:
row[i] = None
if any(all(x == None for x in row) for row in board) or any(all(row[i] == None for row in board) for i in range(len(board[0]))):
print(sum(x or 0 for row in board for x in row) * num)
exit(0)
# Part 2
data = data.split("\r\n\r\n")
nums = list(map(int, data[0].split(",")))
boards = []
for k in data[1:]:
boards.append([])
for j in k.splitlines():
boards[-1].append(list(map(int, j.split())))
lb = None
for num in nums:
bi = 0
while bi < len(boards):
board = boards[bi]
for row in board:
for i in range(len(row)):
if row[i] == num:
row[i] = None
if any(all(x == None for x in row) for row in board) or any(all(row[i] == None for row in board) for i in range(len(board[0]))):
lb = board
del boards[bi]
else:
bi += 1
if len(boards) == 0:
break
print(sum(x or 0 for row in lb for x in row) * num)
| 27.196078
| 136
| 0.504686
| 234
| 1,387
| 2.991453
| 0.17094
| 0.068571
| 0.091429
| 0.13
| 0.804286
| 0.762857
| 0.762857
| 0.762857
| 0.705714
| 0.705714
| 0
| 0.017486
| 0.340303
| 1,387
| 50
| 137
| 27.74
| 0.747541
| 0.009373
| 0
| 0.65
| 0
| 0
| 0.013129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a87c3780e4800a9d4c8355c60e2d800797ba6bcc
| 177
|
py
|
Python
|
tests/base_classifier.py
|
bsteubing/multifunctional
|
92c6e427374b151a8ea57c9be9e9245c717b9d46
|
[
"BSD-3-Clause"
] | 1
|
2021-02-03T22:01:08.000Z
|
2021-02-03T22:01:08.000Z
|
tests/base_classifier.py
|
bsteubing/multifunctional
|
92c6e427374b151a8ea57c9be9e9245c717b9d46
|
[
"BSD-3-Clause"
] | null | null | null |
tests/base_classifier.py
|
bsteubing/multifunctional
|
92c6e427374b151a8ea57c9be9e9245c717b9d46
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import multifunctional as mf
from multifunctional.classifiers.base import BaseMFClassifier
from bw2data.tests import bw2test
@bw2test
def test_dummy():
pass
| 17.7
| 61
| 0.824859
| 22
| 177
| 6.590909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 0.135593
| 177
| 9
| 62
| 19.666667
| 0.928105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0.142857
| 0.571429
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a89cc305d82a83f765b3eed51db6206405092143
| 2,539
|
py
|
Python
|
epytope/Data/pssms/smm/mat/B_18_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smm/mat/B_18_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smm/mat/B_18_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_18_01_10 = {0: {'A': 0.397, 'C': 0.192, 'E': -0.169, 'D': -0.682, 'G': 0.447, 'F': -0.363, 'I': -0.22, 'H': 0.05, 'K': 0.573, 'M': -0.668, 'L': -0.095, 'N': -0.126, 'Q': 0.287, 'P': 0.229, 'S': 0.084, 'R': 0.264, 'T': -0.198, 'W': 0.125, 'V': 0.143, 'Y': -0.27}, 1: {'A': 0.077, 'C': 0.0, 'E': -0.871, 'D': 0.533, 'G': -0.777, 'F': 0.263, 'I': 0.148, 'H': -0.0, 'K': 0.938, 'M': 0.013, 'L': -0.726, 'N': -0.273, 'Q': -0.538, 'P': -0.114, 'S': 0.928, 'R': 0.056, 'T': 0.081, 'W': 0.155, 'V': 0.028, 'Y': 0.078}, 2: {'A': -0.027, 'C': -0.009, 'E': -0.14, 'D': 0.271, 'G': 0.606, 'F': -0.518, 'I': 0.009, 'H': -0.051, 'K': 0.371, 'M': -0.464, 'L': 0.06, 'N': -0.149, 'Q': -0.269, 'P': 0.555, 'S': 0.062, 'R': 0.065, 'T': -0.002, 'W': -0.312, 'V': -0.048, 'Y': -0.011}, 3: {'A': -0.047, 'C': 0.015, 'E': -0.014, 'D': -0.041, 'G': 0.021, 'F': -0.039, 'I': 0.024, 'H': 0.014, 'K': 0.028, 'M': -0.073, 'L': -0.057, 'N': 0.146, 'Q': -0.005, 'P': 0.121, 'S': -0.011, 'R': -0.0, 'T': 0.066, 'W': -0.049, 'V': -0.016, 'Y': -0.082}, 4: {'A': -0.173, 'C': 0.038, 'E': 0.058, 'D': -0.017, 'G': 0.007, 'F': -0.108, 'I': 0.027, 'H': 0.1, 'K': 0.227, 'M': 0.001, 'L': -0.193, 'N': -0.097, 'Q': 0.019, 'P': 0.111, 'S': 0.007, 'R': 0.05, 'T': -0.089, 'W': 0.001, 'V': -0.043, 'Y': 0.074}, 5: {'A': -0.009, 'C': -0.096, 'E': 0.045, 'D': 0.109, 'G': 0.051, 'F': -0.126, 'I': -0.016, 'H': -0.06, 'K': 0.015, 'M': -0.0, 'L': -0.066, 'N': -0.075, 'Q': 0.156, 'P': 0.079, 'S': -0.006, 'R': -0.016, 'T': 0.146, 'W': -0.083, 'V': -0.017, 'Y': -0.03}, 6: {'A': -0.101, 'C': -0.108, 'E': 0.11, 'D': -0.014, 'G': -0.015, 'F': -0.131, 'I': 0.029, 'H': 0.096, 'K': 0.103, 'M': -0.028, 'L': -0.075, 'N': 0.029, 'Q': 0.181, 'P': 0.157, 'S': 0.052, 'R': -0.022, 'T': -0.025, 'W': -0.04, 'V': -0.061, 'Y': -0.136}, 7: {'A': -0.409, 'C': 0.344, 'E': 0.089, 'D': 0.197, 'G': 0.032, 'F': -0.434, 'I': 0.211, 'H': -0.255, 'K': 0.828, 'M': -0.471, 'L': -0.126, 'N': 0.006, 'Q': 0.153, 'P': 0.233, 'S': 0.073, 'R': 0.396, 'T': 0.02, 'W': -0.229, 'V': -0.242, 'Y': -0.417}, 8: {'A': -0.01, 'C': 0.005, 'E': -0.004, 'D': 0.01, 'G': 0.006, 'F': 0.01, 'I': 0.003, 'H': -0.0, 'K': -0.001, 'M': 0.006, 'L': 0.003, 'N': 0.001, 'Q': -0.001, 'P': 0.004, 'S': -0.01, 'R': 0.003, 'T': -0.013, 'W': -0.001, 'V': -0.011, 'Y': -0.002}, 9: {'A': 0.16, 'C': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.264, 'F': -0.548, 'I': 0.191, 'H': 0.0, 'K': 0.373, 'M': -0.625, 'L': 0.004, 'N': 0.0, 'Q': -0.049, 'P': 0.0, 'S': 0.188, 'R': 0.129, 'T': 0.542, 'W': 0.136, 'V': 0.189, 'Y': -0.953}, -1: {'con': 4.64684}}
| 2,539
| 2,539
| 0.389917
| 618
| 2,539
| 1.597087
| 0.279935
| 0.02229
| 0.009119
| 0.012158
| 0.039514
| 0
| 0
| 0
| 0
| 0
| 0
| 0.368471
| 0.163056
| 2,539
| 1
| 2,539
| 2,539
| 0.096
| 0
| 0
| 0
| 0
| 0
| 0.079921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8ac1180e759cda7753f63c34af1669ac95511dc
| 4,885
|
py
|
Python
|
tethys_services/base.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2020-10-08T20:38:33.000Z
|
2020-10-08T20:38:33.000Z
|
tethys_services/base.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2018-04-14T19:40:54.000Z
|
2018-04-14T19:40:54.000Z
|
tethys_services/base.py
|
quyendong/tethys
|
99bcb524d5b2021b88d5fa15b7ed6b8acb460997
|
[
"BSD-2-Clause"
] | 1
|
2021-09-07T14:47:11.000Z
|
2021-09-07T14:47:11.000Z
|
"""
********************************************************************************
* Name: base.py
* Author: Nathan Swain
* Created On: 2014
* Copyright: (c) Brigham Young University 2014
* License: BSD 2-Clause
********************************************************************************
"""
from tethys_dataset_services.valid_engines import VALID_ENGINES, VALID_SPATIAL_ENGINES
from tethys_apps.cli.cli_colors import pretty_output, FG_WHITE
class DatasetService:
"""
Used to define dataset services for apps.
"""
def __init__(self, name, type, endpoint, apikey=None, username=None, password=None):
"""
Constructor
"""
self.name = name
# Validate the types
if type in VALID_ENGINES:
self.type = type
self.engine = VALID_ENGINES[type]
else:
engine_key_list = list(VALID_ENGINES)
if len(VALID_ENGINES) > 2:
comma_separated_types = ', '.join('"{0}"'.format(t) for t in engine_key_list[:-1])
last_type = '"{0}"'.format(engine_key_list[-1])
valid_types_string = '{0}, and {1}'.format(comma_separated_types, last_type)
elif len(VALID_ENGINES) == 2:
valid_types_string = '"{0}" and "{1}"'.format(engine_key_list[0], engine_key_list[1])
else:
valid_types_string = '"{0}"'.format(engine_key_list[0])
raise ValueError('The value "{0}" is not a valid for argument "type" of DatasetService. Valid values for '
'"type" argument include {1}.'.format(type, valid_types_string))
self.endpoint = endpoint
self.apikey = apikey
self.username = username
self.password = password
with pretty_output(FG_WHITE) as p:
p.write('DEPRECATION WARNING: Storing connection credentials for Dataset Services in the app.py is a '
'security leak. App configuration for Dataset Services will be deprecated in version 1.2.')
def __repr__(self):
"""
String representation
"""
return '<DatasetService: type={0}, api_endpoint={1}>'.format(self.type, self.endpoint)
class SpatialDatasetService:
"""
Used to define spatial dataset services for apps.
"""
def __init__(self, name, type, endpoint, apikey=None, username=None, password=None):
"""
Constructor
"""
self.name = name
# Validate the types
if type in VALID_SPATIAL_ENGINES:
self.type = type
self.engine = VALID_SPATIAL_ENGINES[type]
else:
spatial_engine_key_list = list(VALID_SPATIAL_ENGINES)
if len(VALID_SPATIAL_ENGINES) > 2:
comma_separated_types = ', '.join('"{0}"'.format(t) for t in spatial_engine_key_list[:-1])
last_type = '"{0}"'.format(spatial_engine_key_list[-1])
valid_types_string = '{0}, and {1}'.format(comma_separated_types, last_type)
elif len(VALID_SPATIAL_ENGINES) == 2:
valid_types_string = '"{0}" and "{1}"'.format(spatial_engine_key_list[0], spatial_engine_key_list[1])
else:
valid_types_string = '"{0}"'.format(spatial_engine_key_list[0])
raise ValueError('The value "{0}" is not a valid for argument "type" of SpatialDatasetService.'
' Valid values for "type" argument include {1}.'.format(type, valid_types_string))
self.endpoint = endpoint
self.apikey = apikey
self.username = username
self.password = password
with pretty_output(FG_WHITE) as p:
p.write('DEPRECATION WARNING: Storing connection credentials for Spatial Dataset Services '
'in the app.py is a security leak. App configuration for Spatial Dataset Services '
'will be deprecated in version 1.2.')
def __repr__(self):
"""
String representation
"""
return '<SpatialDatasetService: type={0}, api_endpoint={1}>'.format(self.type, self.endpoint)
class WpsService:
"""
Used to define dataset services for apps.
"""
def __init__(self, name, endpoint, username=None, password=None):
"""
Constructor
"""
self.name = name
self.endpoint = endpoint
self.username = username
self.password = password
with pretty_output(FG_WHITE) as p:
p.write('DEPRECATION WARNING: Storing connection credentials for WPS Services in the app.py is a security '
'leak. App configuration for WPS Services will be deprecated in version 1.2.')
def __repr__(self):
"""
String representation
"""
return '<WpsService: name={0}, endpoint={1}>'.format(self.name, self.endpoint)
| 38.164063
| 119
| 0.587922
| 558
| 4,885
| 4.94086
| 0.18638
| 0.039173
| 0.056583
| 0.030468
| 0.804498
| 0.770403
| 0.757708
| 0.733043
| 0.694958
| 0.669568
| 0
| 0.015023
| 0.277789
| 4,885
| 127
| 120
| 38.464567
| 0.76644
| 0.11566
| 0
| 0.441176
| 0
| 0
| 0.243217
| 0.010901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0.088235
| 0.029412
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a8c72ab4f0940b3dcc1ebf7dffe0aea35e2a1e49
| 29
|
py
|
Python
|
ccdl/__init__.py
|
xabgesagtx/ccdl
|
c6931d38c6068b6c48b033e2619a75157cbd6ac2
|
[
"MIT"
] | 1
|
2015-12-10T16:41:52.000Z
|
2015-12-10T16:41:52.000Z
|
ccdl/__init__.py
|
xabgesagtx/ccdl
|
c6931d38c6068b6c48b033e2619a75157cbd6ac2
|
[
"MIT"
] | null | null | null |
ccdl/__init__.py
|
xabgesagtx/ccdl
|
c6931d38c6068b6c48b033e2619a75157cbd6ac2
|
[
"MIT"
] | 1
|
2022-03-21T07:24:56.000Z
|
2022-03-21T07:24:56.000Z
|
from .ccdl import CcDownload
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7690aa8625abf3609eda5c764fde4bc287acdb9e
| 12,274
|
py
|
Python
|
SimModel_Python_API/simmodel_swig/Release/SimMaterial_GlazingMaterial_Glazing.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | 3
|
2016-05-30T15:12:16.000Z
|
2022-03-22T08:11:13.000Z
|
SimModel_Python_API/simmodel_swig/Release/SimMaterial_GlazingMaterial_Glazing.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | 21
|
2016-06-13T11:33:45.000Z
|
2017-05-23T09:46:52.000Z
|
SimModel_Python_API/simmodel_swig/Release/SimMaterial_GlazingMaterial_Glazing.py
|
EnEff-BIM/EnEffBIM-Framework
|
6328d39b498dc4065a60b5cc9370b8c2a9a1cddf
|
[
"MIT"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimMaterial_GlazingMaterial_Glazing', [dirname(__file__)])
except ImportError:
import _SimMaterial_GlazingMaterial_Glazing
return _SimMaterial_GlazingMaterial_Glazing
if fp is not None:
try:
_mod = imp.load_module('_SimMaterial_GlazingMaterial_Glazing', fp, pathname, description)
finally:
fp.close()
return _mod
_SimMaterial_GlazingMaterial_Glazing = swig_import_helper()
del swig_import_helper
else:
import _SimMaterial_GlazingMaterial_Glazing
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimMaterial_GlazingMaterial_Gas
import SimMaterial_Default_Default
class SimMaterial_GlazingMaterial_Glazing(SimMaterial_GlazingMaterial_Gas.SimMaterial_GlazingMaterial):
__swig_setmethods__ = {}
for _s in [SimMaterial_GlazingMaterial_Gas.SimMaterial_GlazingMaterial]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimMaterial_GlazingMaterial_Glazing, name, value)
__swig_getmethods__ = {}
for _s in [SimMaterial_GlazingMaterial_Gas.SimMaterial_GlazingMaterial]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimMaterial_GlazingMaterial_Glazing, name)
__repr__ = _swig_repr
def SimMaterial_Name(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_Name(self, *args)
def SimMaterial_Thick(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_Thick(self, *args)
def SimMaterial_Cond(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_Cond(self, *args)
def SimMaterial_OpticalDataType(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_OpticalDataType(self, *args)
def SimMaterial_WndwGlassSpectralDataSetName(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_WndwGlassSpectralDataSetName(self, *args)
def SimMaterial_SolarTransAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_SolarTransAtNrmlIncent(self, *args)
def SimMaterial_FrontSideSolarReflectAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_FrontSideSolarReflectAtNrmlIncent(self, *args)
def SimMaterial_BackSideSolarReflectAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_BackSideSolarReflectAtNrmlIncent(self, *args)
def SimMaterial_VisTransAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_VisTransAtNrmlIncent(self, *args)
def SimMaterial_FrontSideVisReflectAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_FrontSideVisReflectAtNrmlIncent(self, *args)
def SimMaterial_BackSideVisReflectAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_BackSideVisReflectAtNrmlIncent(self, *args)
def SimMaterial_InfraredTransAtNrmlIncent(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_InfraredTransAtNrmlIncent(self, *args)
def SimMaterial_FrontSideInfraredHemisphEmis(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_FrontSideInfraredHemisphEmis(self, *args)
def SimMaterial_BackSideInfraredHemisphEmis(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_BackSideInfraredHemisphEmis(self, *args)
def SimMaterial_DirtCorrectFactorForSolar_VisTrans(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_DirtCorrectFactorForSolar_VisTrans(self, *args)
def SimMaterial_SolarDiffusing(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_SolarDiffusing(self, *args)
def SimMaterial_YoungsModulus(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_YoungsModulus(self, *args)
def SimMaterial_PoissonsRatio(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_SimMaterial_PoissonsRatio(self, *args)
def MatProp_GlazingSpectralData_Name(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_MatProp_GlazingSpectralData_Name(self, *args)
def MatProp_GlazingSpectralData_Wavelength_1_800(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_MatProp_GlazingSpectralData_Wavelength_1_800(self, *args)
def MatProp_GlazingSpectralData_Trans_1_800(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_MatProp_GlazingSpectralData_Trans_1_800(self, *args)
def MatProp_GlazingSpectralData_FrontReflect_1_800(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_MatProp_GlazingSpectralData_FrontReflect_1_800(self, *args)
def MatProp_GlazingSpectralData_BackReflect_1_800(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_MatProp_GlazingSpectralData_BackReflect_1_800(self, *args)
def __init__(self, *args):
this = _SimMaterial_GlazingMaterial_Glazing.new_SimMaterial_GlazingMaterial_Glazing(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing__clone(self, f, c)
__swig_destroy__ = _SimMaterial_GlazingMaterial_Glazing.delete_SimMaterial_GlazingMaterial_Glazing
__del__ = lambda self: None
SimMaterial_GlazingMaterial_Glazing_swigregister = _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_swigregister
SimMaterial_GlazingMaterial_Glazing_swigregister(SimMaterial_GlazingMaterial_Glazing)
class SimMaterial_GlazingMaterial_Glazing_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimMaterial_GlazingMaterial_Glazing_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimMaterial_GlazingMaterial_Glazing_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimMaterial_GlazingMaterial_Glazing.new_SimMaterial_GlazingMaterial_Glazing_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_assign(self, n, x)
def begin(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_begin(self, *args)
def end(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_end(self, *args)
def rbegin(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_rend(self, *args)
def at(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_at(self, *args)
def front(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_front(self, *args)
def back(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_back(self, *args)
def push_back(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_push_back(self, *args)
def pop_back(self):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_insert(self, *args)
def erase(self, *args):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_swap(self, x)
__swig_destroy__ = _SimMaterial_GlazingMaterial_Glazing.delete_SimMaterial_GlazingMaterial_Glazing_sequence
__del__ = lambda self: None
SimMaterial_GlazingMaterial_Glazing_sequence_swigregister = _SimMaterial_GlazingMaterial_Glazing.SimMaterial_GlazingMaterial_Glazing_sequence_swigregister
SimMaterial_GlazingMaterial_Glazing_sequence_swigregister(SimMaterial_GlazingMaterial_Glazing_sequence)
# This file is compatible with both classic and new-style classes.
| 45.970037
| 154
| 0.786704
| 1,251
| 12,274
| 7.183054
| 0.135891
| 0.33274
| 0.396617
| 0.29379
| 0.676052
| 0.643
| 0.640775
| 0.553639
| 0.517805
| 0.443134
| 0
| 0.004492
| 0.147629
| 12,274
| 266
| 155
| 46.142857
| 0.854426
| 0.023953
| 0
| 0.255102
| 1
| 0
| 0.019215
| 0.006015
| 0
| 0
| 0
| 0
| 0
| 1
| 0.239796
| false
| 0.010204
| 0.066327
| 0.209184
| 0.647959
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
7691b0973118054b83bedab1ece63281bd28fefe
| 49
|
py
|
Python
|
usage_demo/my_other_program.py
|
aroberge/importhooks
|
57483ce24d265d391587f6321954f2ed60f04afd
|
[
"MIT"
] | null | null | null |
usage_demo/my_other_program.py
|
aroberge/importhooks
|
57483ce24d265d391587f6321954f2ed60f04afd
|
[
"MIT"
] | null | null | null |
usage_demo/my_other_program.py
|
aroberge/importhooks
|
57483ce24d265d391587f6321954f2ed60f04afd
|
[
"MIT"
] | null | null | null |
# my_other_program.py
import my_program # noqa
| 12.25
| 25
| 0.77551
| 8
| 49
| 4.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 3
| 26
| 16.333333
| 0.853659
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76c50d7ff0229d1e0e8ecd450f289bf586b425fa
| 372
|
py
|
Python
|
fabrik_full_body/output_writer.py
|
Atiehmerikh/FABRIK_Full_Body
|
ae31fd059e65db18cbcf5fcdb43cfad3e015a359
|
[
"MIT"
] | 7
|
2020-09-29T23:49:05.000Z
|
2022-02-07T12:51:11.000Z
|
fabrik_full_body/output_writer.py
|
Atiehmerikh/FABRIK_Full_Body
|
ae31fd059e65db18cbcf5fcdb43cfad3e015a359
|
[
"MIT"
] | 2
|
2020-07-09T18:46:31.000Z
|
2022-03-17T06:42:41.000Z
|
fabrik_full_body/output_writer.py
|
Atiehmerikh/FABRIK_python
|
ae31fd059e65db18cbcf5fcdb43cfad3e015a359
|
[
"MIT"
] | 5
|
2021-03-15T12:50:16.000Z
|
2022-02-11T11:10:36.000Z
|
from fabrik_full_body.singleton import Singleton
class OutputWriter(metaclass=Singleton):
def __init__(self, base_address="./outputs/", angles_file_address = "angles.txt"):
self.base_address = base_address
self.angles_file_address = base_address + angles_file_address
def angle_writer(self):
return open(self.angles_file_address, "w")
| 41.333333
| 86
| 0.741935
| 47
| 372
| 5.468085
| 0.489362
| 0.171206
| 0.264591
| 0.163424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 372
| 8
| 87
| 46.5
| 0.829032
| 0
| 0
| 0
| 0
| 0
| 0.056452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
76c5b0599de622291a540c654c4951f237f6e8b0
| 42
|
py
|
Python
|
chrome_handler/__init__.py
|
kaito1002/chrome_handler
|
d9ec0229a57e370e05cd2eef51c2dc18abf5c463
|
[
"MIT"
] | null | null | null |
chrome_handler/__init__.py
|
kaito1002/chrome_handler
|
d9ec0229a57e370e05cd2eef51c2dc18abf5c463
|
[
"MIT"
] | null | null | null |
chrome_handler/__init__.py
|
kaito1002/chrome_handler
|
d9ec0229a57e370e05cd2eef51c2dc18abf5c463
|
[
"MIT"
] | null | null | null |
from .chrome_handler import ChromeHandler
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76c9b903c76be1afe3aa574aaab0c0f54fe035ec
| 59,578
|
py
|
Python
|
tests/test_wsman.py
|
Samathy/pypsrp
|
196b4279581319930f2133860bce2f901a8bb127
|
[
"MIT"
] | null | null | null |
tests/test_wsman.py
|
Samathy/pypsrp
|
196b4279581319930f2133860bce2f901a8bb127
|
[
"MIT"
] | null | null | null |
tests/test_wsman.py
|
Samathy/pypsrp
|
196b4279581319930f2133860bce2f901a8bb127
|
[
"MIT"
] | null | null | null |
import os
import requests
import sys
import uuid
import pytest
import pypsrp.wsman as pypsrp_wsman
from pypsrp.encryption import WinRMEncryption
from pypsrp.exceptions import AuthenticationError, WinRMError, \
WinRMTransportError, WSManFaultError
from pypsrp.negotiate import HTTPNegotiateAuth
from pypsrp.wsman import OptionSet, SelectorSet, WSMan, WSManAction, \
NAMESPACES, _TransportHTTP
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
try:
import requests_credssp
except ImportError:
requests_credssp = None
if sys.version_info[0] == 2 and sys.version_info[1] < 7: # pragma: no cover
# ElementTree in Python 2.6 does not support namespaces so we need to use
# lxml instead for this version
from lxml import etree as ET
else: # pragma: no cover
import xml.etree.ElementTree as ET
class _TransportTest(object):
def __init__(self, expected_action=None):
self.endpoint = "testendpoint"
self.expected_action = expected_action
def send(self, xml):
# ensure wsman is always sending a byte string
assert isinstance(xml, bytes)
if self.expected_action is None:
# see what happens if the text is XML but not a WSManFault message
raise WinRMTransportError("http", 401, "not an XML response")
req = ET.fromstring(xml)
action = req.find("s:Header/wsa:Action", NAMESPACES).text
if action == self.expected_action:
return '''<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing">
<s:Header>
<wsa:RelatesTo>uuid:00000000-0000-0000-0000-000000000000</wsa:RelatesTo>
</s:Header>
<s:Body>body</s:Body>
</s:Envelope>'''
else:
# we want to set a non XML message as the response text to verify
# the parsing failure is checked and the original exception is
# raised
error_msg = '''<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:x="http://schemas.xmlsoap.org/ws/2004/09/transfer" xmlns:e="http://schemas.xmlsoap.org/ws/2004/08/eventing" xmlns:n="http://schemas.xmlsoap.org/ws/2004/09/enumeration" xmlns:w="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell" xmlns:p="http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"><s:Header><a:Action>http://schemas.xmlsoap.org/ws/2004/08/addressing/fault</a:Action><a:MessageID>uuid:4DB571F9-F8DE-48FD-872C-2AF08D996249</a:MessageID><a:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:To><a:RelatesTo>uuid:eaa98952-3188-458f-b265-b03ace115f20</a:RelatesTo><s:NotUnderstood qname="wsman:ResourceUri" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" />
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>IllegalAction</s:Value>
</s:Code>
<s:Reason>
<s:Text xml:lang="">Illegal action '%s', expecting '%s'</s:Text>
</s:Reason>
</s:Fault>
</s:Body>
</s:Envelope>''' % (action, self.expected_action)
raise WinRMTransportError("http", 500, error_msg)
class TestWSMan(object):
def test_wsman_defaults(self):
actual = WSMan("")
assert actual.max_envelope_size == 153600
assert actual.max_payload_size < actual.max_envelope_size
assert actual.operation_timeout == 20
assert isinstance(actual.session_id, str)
# verify we get a unique session id each time this is initialised
new_wsman = WSMan("")
assert actual.session_id != new_wsman.session_id
def test_override_default(self):
actual = WSMan("", 8192, 30)
assert actual.max_envelope_size == 8192
assert actual.max_payload_size < actual.max_envelope_size
assert actual.operation_timeout == 30
def test_invoke_command(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.COMMAND)
actual = wsman.command("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_connect(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.CONNECT)
actual = wsman.connect("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_create(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.CREATE)
actual = wsman.create("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_disconnect(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.DISCONNECT)
actual = wsman.disconnect("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_enumerate(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.ENUMERATE)
actual = wsman.enumerate("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_delete(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.DELETE)
actual = wsman.delete("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_get(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.GET)
actual = wsman.get("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_pull(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.PULL)
actual = wsman.pull("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_put(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.PUT)
actual = wsman.put("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_receive(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.RECEIVE)
actual = wsman.receive("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_reconnect(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.RECONNECT)
actual = wsman.reconnect("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_send(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.SEND)
actual = wsman.send("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_invoke_signal(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000000")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.SIGNAL)
actual = wsman.signal("", None)
assert actual.tag == "{http://www.w3.org/2003/05/soap-envelope}Body"
assert actual.text == "body"
def test_get_header_no_locale(self):
wsman = WSMan("")
actual = wsman._create_header("action", "resource", None, None, None)
actual_data_locale = actual.find("wsmv:DataLocale", NAMESPACES)
actual_locale = actual.find("wsman:Locale", NAMESPACES)
xml = NAMESPACES['xml']
assert actual_data_locale.attrib["{%s}lang" % xml] == "en-US"
assert actual_locale.attrib["{%s}lang" % xml] == "en-US"
def test_get_header_explicit_locale(self):
wsman = WSMan("", locale="en-GB")
actual = wsman._create_header("action", "resource", None, None, None)
actual_data_locale = actual.find("wsmv:DataLocale", NAMESPACES)
actual_locale = actual.find("wsman:Locale", NAMESPACES)
xml = NAMESPACES['xml']
assert actual_data_locale.attrib["{%s}lang" % xml] == "en-GB"
assert actual_locale.attrib["{%s}lang" % xml] == "en-GB"
def test_get_header_explicit_data_locale(self):
wsman = WSMan("", data_locale="en-GB")
actual = wsman._create_header("action", "resource", None, None, None)
actual_data_locale = actual.find("wsmv:DataLocale", NAMESPACES)
actual_locale = actual.find("wsman:Locale", NAMESPACES)
xml = NAMESPACES['xml']
assert actual_data_locale.attrib["{%s}lang" % xml] == "en-GB"
assert actual_locale.attrib["{%s}lang" % xml] == "en-US"
def test_get_header_explicit_both_locale(self):
wsman = WSMan("", locale="en-AU", data_locale="en-GB")
actual = wsman._create_header("action", "resource", None, None, None)
actual_data_locale = actual.find("wsmv:DataLocale", NAMESPACES)
actual_locale = actual.find("wsman:Locale", NAMESPACES)
xml = NAMESPACES['xml']
assert actual_data_locale.attrib["{%s}lang" % xml] == "en-GB"
assert actual_locale.attrib["{%s}lang" % xml] == "en-AU"
def test_invoke_mismatch_id(self, monkeypatch):
def mockuuid():
return uuid.UUID("00000000-0000-0000-0000-000000000001")
monkeypatch.setattr(uuid, 'uuid4', mockuuid)
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.SEND)
with pytest.raises(WinRMError) as exc:
wsman.send("", None)
assert str(exc.value) == \
"Received related id does not match related expected message " \
"id: Sent: uuid:00000000-0000-0000-0000-000000000001, Received: " \
"uuid:00000000-0000-0000-0000-000000000000"
def test_invoke_transport_error(self):
wsman = WSMan("")
wsman.transport = _TransportTest()
with pytest.raises(WinRMTransportError) as exc:
wsman.send("", None)
error_msg = "Bad HTTP response returned from the server. Code: 401, " \
"Content: 'not an XML response'"
assert str(exc.value) == error_msg
assert exc.value.code == 401
assert exc.value.protocol == "http"
assert exc.value.message == error_msg
assert exc.value.response_text == "not an XML response"
def test_invoke_wsman_fault(self):
# we set Create and send Send to cause the test transport to fire the
# error we want
wsman = WSMan("")
wsman.transport = _TransportTest(WSManAction.CREATE)
with pytest.raises(WSManFaultError) as exc:
wsman.send("", None)
error_msg = \
"Received a WSManFault message. (Code: IllegalAction, Reason: " \
"Illegal action '%s', expecting '%s')" \
% (WSManAction.SEND, WSManAction.CREATE)
assert str(exc.value) == error_msg
assert exc.value.code == "IllegalAction"
assert exc.value.machine is None
assert exc.value.message == error_msg
assert exc.value.provider is None
assert exc.value.provider_fault is None
assert exc.value.reason == "Illegal action '%s', expecting '%s'" \
% (WSManAction.SEND, WSManAction.CREATE)
def test_raise_native_wsman_fault(self):
xml_text = '''
<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:x="http://schemas.xmlsoap.org/ws/2004/09/transfer" xmlns:e="http://schemas.xmlsoap.org/ws/2004/08/eventing" xmlns:n="http://schemas.xmlsoap.org/ws/2004/09/enumeration" xmlns:w="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell" xmlns:p="http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"><s:Header><a:Action>http://schemas.xmlsoap.org/ws/2004/08/addressing/fault</a:Action><a:MessageID>uuid:4DB571F9-F8DE-48FD-872C-2AF08D996249</a:MessageID><a:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:To><a:RelatesTo>uuid:eaa98952-3188-458f-b265-b03ace115f20</a:RelatesTo><s:NotUnderstood qname="wsman:ResourceUri" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" />
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>s:MustUnderstand</s:Value>
</s:Code>
<s:Reason>
<s:Text xml:lang="">The WS-Management service cannot process a SOAP header in the request that is marked as mustUnderstand by the client. This could be caused by the use of a version of the protocol which is not supported, or may be an incompatibility between the client and server implementations. </s:Text>
</s:Reason>
</s:Fault>
</s:Body>
</s:Envelope>'''
with pytest.raises(WSManFaultError) as exc:
raise WSMan._parse_wsman_fault(xml_text)
assert exc.value.code == "s:MustUnderstand"
assert exc.value.machine is None
assert exc.value.message == \
"Received a WSManFault message. (Code: s:MustUnderstand, " \
"Reason: The WS-Management service cannot process a SOAP header " \
"in the request that is marked as mustUnderstand by the client. " \
" This could be caused by the use of a version of the protocol " \
"which is not supported, or may be an incompatibility between " \
"the client and server implementations.)"
assert exc.value.provider is None
assert exc.value.provider_fault is None
assert exc.value.provider_path is None
assert exc.value.reason == \
"The WS-Management service cannot process a SOAP header in the " \
"request that is marked as mustUnderstand by the client. This " \
"could be caused by the use of a version of the protocol which " \
"is not supported, or may be an incompatibility between the " \
"client and server implementations."
def test_raise_native_wsman_fault_no_reason(self):
xml_text = '''
<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:x="http://schemas.xmlsoap.org/ws/2004/09/transfer" xmlns:e="http://schemas.xmlsoap.org/ws/2004/08/eventing" xmlns:n="http://schemas.xmlsoap.org/ws/2004/09/enumeration" xmlns:w="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell" xmlns:p="http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"><s:Header><a:Action>http://schemas.xmlsoap.org/ws/2004/08/addressing/fault</a:Action><a:MessageID>uuid:4DB571F9-F8DE-48FD-872C-2AF08D996249</a:MessageID><a:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:To><a:RelatesTo>uuid:eaa98952-3188-458f-b265-b03ace115f20</a:RelatesTo><s:NotUnderstood qname="wsman:ResourceUri" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" />
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>s:Unknown</s:Value>
</s:Code>
</s:Fault>
</s:Body>
</s:Envelope>'''
with pytest.raises(WSManFaultError) as exc:
raise WSMan._parse_wsman_fault(xml_text)
assert exc.value.code == "s:Unknown"
assert exc.value.machine is None
assert exc.value.message == "Received a WSManFault message. " \
"(Code: s:Unknown)"
assert exc.value.provider is None
assert exc.value.provider_fault is None
assert exc.value.provider_path is None
assert exc.value.reason is None
def test_raise_wsman_fault_with_wsman_fault(self):
xml_text = r'''
<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:x="http://schemas.xmlsoap.org/ws/2004/09/transfer" xmlns:e="http://schemas.xmlsoap.org/ws/2004/08/eventing" xmlns:n="http://schemas.xmlsoap.org/ws/2004/09/enumeration" xmlns:w="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:p="http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd">
<s:Header>
<a:Action>http://schemas.dmtf.org/wbem/wsman/1/wsman/fault</a:Action>
<a:MessageID>uuid:348D9DCE-B99B-4EBD-A90B-624854B032BB</a:MessageID>
<a:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:To>
<a:RelatesTo>uuid:a82b5f24-7a6c-4170-8cd1-d2031b1203fd</a:RelatesTo>
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>s:Sender</s:Value>
<s:Subcode>
<s:Value>w:InvalidParameter</s:Value>
</s:Subcode>
</s:Code>
<s:Reason>
<s:Text xml:lang="">The parameter is incorrect. </s:Text>
</s:Reason>
<s:Detail>
<w:FaultDetail>http://schemas.dmtf.org/wbem/wsman/1/wsman/faultDetail/InvalidValue</w:FaultDetail>
<f:WSManFault xmlns:f="http://schemas.microsoft.com/wbem/wsman/1/wsmanfault" Code="87" Machine="SERVER2016.domain.local">
<f:Message><f:ProviderFault provider="Shell cmd plugin" path="%systemroot%\system32\winrscmd.dll">The parameter is incorrect. </f:ProviderFault></f:Message>
</f:WSManFault>
</s:Detail>
</s:Fault>
</s:Body>
</s:Envelope>'''
with pytest.raises(WSManFaultError) as exc:
raise WSMan._parse_wsman_fault(xml_text)
assert exc.value.code == 87
assert exc.value.machine == "SERVER2016.domain.local"
assert exc.value.message == \
"Received a WSManFault message. (Code: 87, Machine: " \
"SERVER2016.domain.local, Reason: The parameter is incorrect., " \
"Provider: Shell cmd plugin, Provider Path: %systemroot%\\" \
"system32\\winrscmd.dll, Provider Fault: The parameter is " \
"incorrect.)"
assert exc.value.provider == "Shell cmd plugin"
assert exc.value.provider_fault == "The parameter is incorrect."
assert exc.value.provider_path == \
"%systemroot%\\system32\\winrscmd.dll"
assert exc.value.reason == "The parameter is incorrect."
def test_raise_wsman_fault_without_provider(self):
xml_text = r'''
<s:Envelope xml:lang="en-US" xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:x="http://schemas.xmlsoap.org/ws/2004/09/transfer" xmlns:e="http://schemas.xmlsoap.org/ws/2004/08/eventing" xmlns:n="http://schemas.xmlsoap.org/ws/2004/09/enumeration" xmlns:w="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:p="http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd">
<s:Header>
<a:Action>http://schemas.dmtf.org/wbem/wsman/1/wsman/fault</a:Action>
<a:MessageID>uuid:EE71C444-1658-4B3F-916D-54CE43B68BC9</a:MessageID>
<a:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</a:To>
<a:RelatesTo>uuid.761ca906-0bf0-41bb-a9d9-4cbbca986aeb</a:RelatesTo>
</s:Header>
<s:Body>
<s:Fault>
<s:Code>
<s:Value>s:Sender</s:Value>
<s:Subcode>
<s:Value>w:SchemaValidationError</s:Value>
</s:Subcode>
</s:Code>
<s:Reason>
<s:Text xml:lang="">The SOAP XML in the message does not match the corresponding XML schema definition. Change the XML and retry. </s:Text>
</s:Reason>
<s:Detail>
<f:WSManFault xmlns:f="http://schemas.microsoft.com/wbem/wsman/1/wsmanfault" Code="2150858817" Machine="SERVER2008.domain.local">
<f:Message>The Windows Remote Shell cannot process the request. The SOAP packet contains an element Argument that is invalid. Retry the request with the correct XML element. </f:Message>
</f:WSManFault>
</s:Detail>
</s:Fault>
</s:Body>
</s:Envelope>'''
with pytest.raises(WSManFaultError) as exc:
raise WSMan._parse_wsman_fault(xml_text)
assert exc.value.code == 2150858817
assert exc.value.machine == "SERVER2008.domain.local"
assert exc.value.message == \
"Received a WSManFault message. (Code: 2150858817, Machine: " \
"SERVER2008.domain.local, Reason: The Windows Remote Shell " \
"cannot process the request. The SOAP packet contains an " \
"element Argument that is invalid. Retry the request with the " \
"correct XML element.)"
assert exc.value.provider is None
assert exc.value.provider_fault is None
assert exc.value.provider_path is None
assert exc.value.reason == \
"The Windows Remote Shell cannot process the request. The SOAP " \
"packet contains an element Argument that is invalid. Retry the " \
"request with the correct XML element."
def test_wsman_update_envelope_size_explicit(self):
wsman = WSMan("")
wsman.update_max_payload_size(4096)
assert wsman.max_envelope_size == 4096
# this next value is dependent on a lot of things such as python
# version and rounding differences, we will just assert against a range
assert 1450 <= wsman.max_payload_size <= 1835
@pytest.mark.parametrize('wsman_conn',
# we just want to validate against different env
# set on a server
[[False, 'test_wsman_update_envelope_size_150']],
indirect=True)
def test_wsman_update_envelope_size_150(self, wsman_conn):
wsman_conn.update_max_payload_size()
assert wsman_conn.max_envelope_size == 153600
# this next value is dependent on a lot of things such as python
# version and rounding differences, we will just assert against a range
assert 113574 <= wsman_conn.max_payload_size <= 113952
@pytest.mark.parametrize('wsman_conn',
# we just want to validate against different env
# set on a server
[[False, 'test_wsman_update_envelope_size_500']],
indirect=True)
def test_wsman_update_envelope_size_500(self, wsman_conn):
wsman_conn.update_max_payload_size()
assert wsman_conn.max_envelope_size == 512000
# this next value is dependent on a lot of things such as python
# version and rounding differences, we will just assert against a range
assert 382374 <= wsman_conn.max_payload_size <= 382752
@pytest.mark.parametrize('wsman_conn',
# we just want to validate against different env
# set on a server
[[False, 'test_wsman_update_envelope_size_4096']],
indirect=True)
def test_wsman_update_envelope_size_4096(self, wsman_conn):
wsman_conn.update_max_payload_size()
assert wsman_conn.max_envelope_size == 4194304
# this next value is dependent on a lot of things such as python
# version and rounding differences, we will just assert against a range
assert 3144102 <= wsman_conn.max_payload_size <= 3144480
class TestOptionSet(object):
def test_set_no_options(self):
option_set = OptionSet()
actual = option_set.pack()
assert len(actual.attrib.keys()) == 1
assert actual.attrib['{http://www.w3.org/2003/05/soap-envelope}'
'mustUnderstand'] == 'true'
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}OptionSet"
assert actual.text is None
assert list(actual) == []
assert str(option_set) == "{}"
def test_set_one_option(self):
option_set = OptionSet()
option_set.add_option("key", "value")
actual = option_set.pack()
assert len(actual.attrib.keys()) == 1
assert actual.attrib['{http://www.w3.org/2003/05/soap-envelope}'
'mustUnderstand'] == 'true'
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}OptionSet"
assert actual.text is None
children = list(actual)
assert len(children) == 1
assert len(children[0].attrib.keys()) == 1
assert children[0].attrib['Name'] == "key"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Option"
assert children[0].text == "value"
assert str(option_set) == "{'key': 'value'}"
def test_set_one_option_with_attributes(self):
option_set = OptionSet()
option_set.add_option("key", "value",
{"attrib1": "value1", "attrib2": "value2"})
actual = option_set.pack()
assert len(actual.attrib.keys()) == 1
assert actual.attrib['{http://www.w3.org/2003/05/soap-envelope}'
'mustUnderstand'] == 'true'
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}OptionSet"
assert actual.text is None
children = list(actual)
assert len(children) == 1
assert len(children[0].attrib.keys()) == 3
assert children[0].attrib['Name'] == "key"
assert children[0].attrib['attrib1'] == "value1"
assert children[0].attrib['attrib2'] == "value2"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Option"
assert children[0].text == "value"
assert str(option_set) == "{'key': 'value'}"
def test_set_multiple_options(self):
option_set = OptionSet()
option_set.add_option("key1", "value1")
option_set.add_option("key2", "value2")
actual = option_set.pack()
assert len(actual.attrib.keys()) == 1
assert actual.attrib['{http://www.w3.org/2003/05/soap-envelope}'
'mustUnderstand'] == 'true'
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}OptionSet"
assert actual.text is None
children = list(actual)
assert len(children) == 2
assert len(children[0].attrib.keys()) == 1
assert children[0].attrib['Name'] == "key1"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Option"
assert children[0].text == "value1"
assert len(children[1].attrib.keys()) == 1
assert children[1].attrib['Name'] == "key2"
assert children[1].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Option"
assert children[1].text == "value2"
assert str(option_set) == "{'key1': 'value1', 'key2': 'value2'}"
class TestSelectorSet(object):
def test_set_no_options(self):
selector_set = SelectorSet()
actual = selector_set.pack()
assert len(actual.attrib.keys()) == 0
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}SelectorSet"
assert actual.text is None
assert list(actual) == []
assert str(selector_set) == "{}"
def test_set_one_option(self):
selector_set = SelectorSet()
selector_set.add_option("key", "value")
actual = selector_set.pack()
assert len(actual.attrib.keys()) == 0
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}SelectorSet"
assert actual.text is None
children = list(actual)
assert len(children) == 1
assert len(children[0].attrib.keys()) == 1
assert children[0].attrib['Name'] == "key"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Selector"
assert children[0].text == "value"
assert str(selector_set) == "{'key': 'value'}"
def test_set_one_option_with_attributes(self):
selector_set = SelectorSet()
selector_set.add_option("key", "value",
{"attrib1": "value1", "attrib2": "value2"})
actual = selector_set.pack()
assert len(actual.attrib.keys()) == 0
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}SelectorSet"
assert actual.text is None
children = list(actual)
assert len(children) == 1
assert len(children[0].attrib.keys()) == 3
assert children[0].attrib['Name'] == "key"
assert children[0].attrib['attrib1'] == "value1"
assert children[0].attrib['attrib2'] == "value2"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Selector"
assert children[0].text == "value"
assert str(selector_set) == "{'key': 'value'}"
def test_set_multiple_options(self):
selector_set = SelectorSet()
selector_set.add_option("key1", "value1")
selector_set.add_option("key2", "value2")
actual = selector_set.pack()
assert len(actual.attrib.keys()) == 0
assert actual.tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}SelectorSet"
assert actual.text is None
children = list(actual)
assert len(children) == 2
assert len(children[0].attrib.keys()) == 1
assert children[0].attrib['Name'] == "key1"
assert children[0].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Selector"
assert children[0].text == "value1"
assert len(children[1].attrib.keys()) == 1
assert children[1].attrib['Name'] == "key2"
assert children[1].tag == \
"{http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd}Selector"
assert children[1].text == "value2"
assert str(selector_set) == "{'key1': 'value1', 'key2': 'value2'}"
class TestTransportHTTP(object):
def test_not_supported_auth(self):
with pytest.raises(ValueError) as err:
_TransportHTTP("", "", auth="fake")
assert str(err.value) == \
"The specified auth 'fake' is not supported, please select one " \
"of 'basic, certificate, credssp, kerberos, negotiate, ntlm'"
def test_invalid_encryption_value(self):
with pytest.raises(ValueError) as err:
_TransportHTTP("", "", encryption="fake")
assert str(err.value) == \
"The encryption value 'fake' must be auto, always, or never"
def test_encryption_always_not_valid_auth_ssl(self):
with pytest.raises(ValueError) as err:
_TransportHTTP("", "", auth="basic", encryption="always", ssl=True)
assert str(err.value) == \
"Cannot use message encryption with auth 'basic', either set " \
"encryption='auto' or use one of the following auth providers: " \
"credssp, kerberos, negotiate, ntlm"
def test_encryption_auto_not_valid_auth_no_ssl(self):
with pytest.raises(ValueError) as err:
_TransportHTTP("", "", auth="basic", encryption="auto", ssl=False)
assert str(err.value) == \
"Cannot use message encryption with auth 'basic', either set " \
"encryption='never', use ssl=True or use one of the following " \
"auth providers: credssp, kerberos, negotiate, ntlm"
def test_build_basic_no_username(self):
transport = _TransportHTTP("")
with pytest.raises(ValueError) as err:
transport._build_auth_basic(None)
assert str(err.value) == \
"For basic auth, the username must be specified"
def test_build_basic_no_password(self):
transport = _TransportHTTP("", username="user")
with pytest.raises(ValueError) as err:
transport._build_auth_basic(None)
assert str(err.value) == \
"For basic auth, the password must be specified"
def test_build_basic(self):
transport = _TransportHTTP("", username="user", password="pass",
auth="basic")
session = transport._build_session()
assert transport.encryption is None
assert isinstance(session.auth, requests.auth.HTTPBasicAuth)
assert session.auth.username == "user"
assert session.auth.password == "pass"
def test_build_certificate_no_key_pem(self):
transport = _TransportHTTP("")
with pytest.raises(ValueError) as err:
transport._build_auth_certificate(None)
assert str(err.value) == \
"For certificate auth, the path to the certificate key pem file " \
"must be specified with certificate_key_pem"
def test_build_certificate_no_pem(self):
transport = _TransportHTTP("", certificate_key_pem="path")
with pytest.raises(ValueError) as err:
transport._build_auth_certificate(None)
assert str(err.value) == \
"For certificate auth, the path to the certificate pem file " \
"must be specified with certificate_pem"
def test_build_certificate_not_ssl(self):
transport = _TransportHTTP("", certificate_key_pem="path",
certificate_pem="path", ssl=False)
with pytest.raises(ValueError) as err:
transport._build_auth_certificate(None)
assert str(err.value) == "For certificate auth, SSL must be used"
def test_build_certificate(self):
transport = _TransportHTTP("", auth="certificate",
certificate_key_pem="key_pem",
certificate_pem="pem")
session = transport._build_session()
assert transport.encryption is None
assert session.auth is None
assert session.cert == ("pem", "key_pem")
assert session.headers['Authorization'] == \
"http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/" \
"https/mutual"
@pytest.mark.skipif(
requests_credssp,
reason="only raises if requests-credssp is not installed",
)
def test_build_credssp_not_imported(self):
transport = _TransportHTTP("", username="user", password="password")
with pytest.raises(
ImportError,
match=(
r"Cannot use CredSSP auth as requests-credssp is not "
r"installed: No module named '?requests_credssp'?"
),
):
transport._build_auth_credssp(None)
def test_build_credssp_no_username(self):
transport = _TransportHTTP("")
with pytest.raises(ValueError) as err:
transport._build_auth_credssp(None)
assert str(err.value) == \
"For credssp auth, the username must be specified"
def test_build_credssp_no_password(self):
transport = _TransportHTTP("", username="user")
with pytest.raises(ValueError) as err:
transport._build_auth_credssp(None)
assert str(err.value) == \
"For credssp auth, the password must be specified"
def test_build_credssp_no_kwargs(self):
credssp = pytest.importorskip("requests_credssp")
transport = _TransportHTTP("", username="user", password="pass",
auth="credssp")
session = transport._build_session()
assert isinstance(session.auth, credssp.HttpCredSSPAuth)
assert session.auth.disable_tlsv1_2 is False
assert session.auth.minimum_version == 2
assert session.auth.password == 'pass'
assert session.auth.username == 'user'
def test_build_credssp_with_kwargs(self):
credssp = pytest.importorskip("requests_credssp")
transport = _TransportHTTP("", username="user", password="pass",
auth="credssp",
credssp_auth_mechanism="kerberos",
credssp_disable_tlsv1_2=True,
credssp_minimum_version=5)
session = transport._build_session()
assert isinstance(session.auth, credssp.HttpCredSSPAuth)
assert session.auth.disable_tlsv1_2 is True
assert session.auth.minimum_version == 5
assert session.auth.password == 'pass'
assert session.auth.username == 'user'
def test_build_kerberos(self):
transport = _TransportHTTP("", auth="kerberos")
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "kerberos"
assert session.auth.delegate is False
assert session.auth.hostname_override is None
assert session.auth.password is None
assert session.auth.send_cbt is True
assert session.auth.service == 'WSMAN'
assert session.auth.username is None
assert session.auth.wrap_required is False
def test_build_kerberos_with_kwargs(self):
transport = _TransportHTTP("", auth="kerberos", username="user",
ssl=False, password="pass",
negotiate_delegate=True,
negotiate_hostname_override="host",
negotiate_send_cbt=False,
negotiate_service="HTTP")
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "kerberos"
assert session.auth.delegate is True
assert session.auth.hostname_override == "host"
assert session.auth.password == "pass"
assert session.auth.send_cbt is False
assert session.auth.service == 'HTTP'
assert session.auth.username == "user"
assert session.auth.wrap_required is True
def test_build_negotiate(self):
transport = _TransportHTTP("")
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "negotiate"
assert session.auth.delegate is False
assert session.auth.hostname_override is None
assert session.auth.password is None
assert session.auth.send_cbt is True
assert session.auth.service == 'WSMAN'
assert session.auth.username is None
assert session.auth.wrap_required is False
def test_build_negotiate_with_kwargs(self):
transport = _TransportHTTP("", auth="negotiate", username="user",
ssl=False, password="pass",
negotiate_delegate=True,
negotiate_hostname_override="host",
negotiate_send_cbt=False,
negotiate_service="HTTP")
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "negotiate"
assert session.auth.delegate is True
assert session.auth.hostname_override == "host"
assert session.auth.password == "pass"
assert session.auth.send_cbt is False
assert session.auth.service == 'HTTP'
assert session.auth.username == "user"
assert session.auth.wrap_required is True
def test_build_ntlm(self):
transport = _TransportHTTP("", auth="ntlm")
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "ntlm"
assert session.auth.delegate is False
assert session.auth.hostname_override is None
assert session.auth.password is None
assert session.auth.send_cbt is True
assert session.auth.service == 'WSMAN'
assert session.auth.username is None
assert session.auth.wrap_required is False
def test_build_ntlm_with_kwargs(self):
transport = _TransportHTTP("", auth="ntlm", username="user",
ssl=False, password="pass",
negotiate_delegate=True,
negotiate_hostname_override="host",
negotiate_send_cbt=False,
negotiate_service="HTTP",
cert_validation=False)
session = transport._build_session()
assert isinstance(session.auth, HTTPNegotiateAuth)
assert session.auth.auth_provider == "ntlm"
assert session.auth.delegate is True
assert session.auth.hostname_override == "host"
assert session.auth.password == "pass"
assert session.auth.send_cbt is False
assert session.auth.service == 'HTTP'
assert session.auth.username == "user"
assert session.auth.wrap_required is True
def test_build_session_default(self):
transport = _TransportHTTP("")
session = transport._build_session()
assert session.headers['User-Agent'] == "Python PSRP Client"
assert session.trust_env is True
assert isinstance(session.auth, HTTPNegotiateAuth)
assert 'http' not in session.proxies
assert 'https' not in session.proxies
assert session.verify is True
def test_build_session_cert_validate(self):
transport = _TransportHTTP("", cert_validation=True)
session = transport._build_session()
assert session.verify is True
def test_build_session_cert_validate_env(self):
transport = _TransportHTTP("", cert_validation=True)
os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
try:
session = transport._build_session()
finally:
del os.environ['REQUESTS_CA_BUNDLE']
assert session.verify == 'path_to_REQUESTS_CA_CERT'
def test_build_session_cert_validate_path_override_env(self):
transport = _TransportHTTP("", cert_validation="kwarg_path")
os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
try:
session = transport._build_session()
finally:
del os.environ['REQUESTS_CA_BUNDLE']
assert session.verify == 'kwarg_path'
def test_build_session_cert_no_validate(self):
transport = _TransportHTTP("", cert_validation=False)
session = transport._build_session()
assert session.verify is False
def test_build_session_cert_no_validate_override_env(self):
transport = _TransportHTTP("", cert_validation=False)
os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
try:
session = transport._build_session()
finally:
del os.environ['REQUESTS_CA_BUNDLE']
assert session.verify is False
def test_build_session_proxies_default(self):
transport = _TransportHTTP("server")
session = transport._build_session()
assert 'http' not in session.proxies
assert 'https' not in session.proxies
def test_build_session_proxies_env(self):
transport = _TransportHTTP("server")
os.environ['https_proxy'] = "https://envproxy"
try:
session = transport._build_session()
finally:
del os.environ['https_proxy']
assert 'http' not in session.proxies
assert session.proxies["https"] == "https://envproxy"
def test_build_session_proxies_kwarg(self):
transport = _TransportHTTP("server", proxy="https://kwargproxy")
session = transport._build_session()
assert 'http' not in session.proxies
assert session.proxies["https"] == "https://kwargproxy"
def test_build_session_proxies_kwarg_non_ssl(self):
transport = _TransportHTTP("server", proxy="http://kwargproxy",
ssl=False)
session = transport._build_session()
assert session.proxies["http"] == "http://kwargproxy"
assert 'https' not in session.proxies
def test_build_session_proxies_env_kwarg_override(self):
transport = _TransportHTTP("server", proxy="https://kwargproxy")
os.environ['https_proxy'] = "https://envproxy"
try:
session = transport._build_session()
finally:
del os.environ['https_proxy']
assert 'http' not in session.proxies
assert session.proxies['https'] == "https://kwargproxy"
def test_build_session_proxies_env_no_proxy_override(self):
transport = _TransportHTTP("server", no_proxy=True)
os.environ['https_proxy'] = "https://envproxy"
try:
session = transport._build_session()
finally:
del os.environ['https_proxy']
assert 'http' not in session.proxies
assert 'https' not in session.proxies
def test_build_session_proxies_kwarg_ignore_no_proxy(self):
transport = _TransportHTTP("server", proxy="https://kwargproxy",
no_proxy=True)
session = transport._build_session()
assert 'http' not in session.proxies
assert session.proxies['https'] == "https://kwargproxy"
def test_send_without_encryption(self, monkeypatch):
send_mock = MagicMock()
monkeypatch.setattr(_TransportHTTP, "_send_request", send_mock)
transport = _TransportHTTP("server")
transport.send(b"message")
assert send_mock.call_count == 1
actual_request = send_mock.call_args[0][0]
assert actual_request.body == b"message"
assert actual_request.url == "https://server:5986/wsman"
assert actual_request.headers['content-type'] == "application/soap+xml;charset=UTF-8"
def test_send_with_encryption(self, monkeypatch):
send_mock = MagicMock()
def send_request(self, *args, **kwargs):
self.session.auth.contexts['server'] = MagicMock()
return send_mock(*args, **kwargs)
wrap_mock = MagicMock()
wrap_mock.return_value = "multipart/encrypted", b"wrapped"
monkeypatch.setattr(_TransportHTTP, "_send_request", send_request)
monkeypatch.setattr(WinRMEncryption, "wrap_message", wrap_mock)
transport = _TransportHTTP("server", ssl=False)
transport.send(b"message")
transport.send(b"message 2")
assert send_mock.call_count == 3
actual_request1 = send_mock.call_args_list[0][0][0]
actual_request2 = send_mock.call_args_list[1][0][0]
actual_request3 = send_mock.call_args_list[2][0][0]
assert actual_request1.body is None
assert actual_request1.url == "http://server:5985/wsman"
assert actual_request2.body == b"wrapped"
assert actual_request2.headers['content-type'] == \
'multipart/encrypted;protocol="application/' \
'HTTP-SPNEGO-session-encrypted";boundary="Encrypted Boundary"'
assert actual_request2.url == "http://server:5985/wsman"
assert actual_request3.body == b"wrapped"
assert actual_request3.headers['content-type'] == \
'multipart/encrypted;protocol="application/' \
'HTTP-SPNEGO-session-encrypted";boundary="Encrypted Boundary"'
assert actual_request3.url == "http://server:5985/wsman"
assert wrap_mock.call_count == 2
assert wrap_mock.call_args_list[0][0][0] == b"message"
assert wrap_mock.call_args_list[1][0][0] == b"message 2"
def test_send_default(self, monkeypatch):
response = requests.Response()
response.status_code = 200
response._content = b"content"
response.headers['content-type'] = "application/soap+xml;charset=UTF-8"
send_mock = MagicMock()
send_mock.return_value = response
monkeypatch.setattr(requests.Session, "send", send_mock)
transport = _TransportHTTP("server", ssl=True)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
actual = transport._send_request(prep_request)
assert actual == b"content"
assert send_mock.call_count == 1
assert send_mock.call_args[0] == (prep_request,)
assert send_mock.call_args[1]['timeout'] == (30, 30)
def test_send_timeout_kwargs(self, monkeypatch):
response = requests.Response()
response.status_code = 200
response._content = b"content"
response.headers['content-type'] = "application/soap+xml;charset=UTF-8"
send_mock = MagicMock()
send_mock.return_value = response
monkeypatch.setattr(requests.Session, "send", send_mock)
transport = _TransportHTTP("server", ssl=True, connection_timeout=20, read_timeout=25)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
actual = transport._send_request(prep_request)
assert actual == b"content"
assert send_mock.call_count == 1
assert send_mock.call_args[0] == (prep_request,)
assert send_mock.call_args[1]['timeout'] == (20, 25)
def test_send_auth_error(self, monkeypatch):
response = requests.Response()
response.status_code = 401
send_mock = MagicMock()
send_mock.return_value = response
monkeypatch.setattr(requests.Session, "send", send_mock)
transport = _TransportHTTP("server", ssl=True)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
with pytest.raises(AuthenticationError) as err:
transport._send_request(prep_request)
assert str(err.value) == "Failed to authenticate the user None with " \
"negotiate"
def test_send_winrm_error_blank(self, monkeypatch):
response = requests.Response()
response.status_code = 500
response._content = b""
send_mock = MagicMock()
send_mock.return_value = response
monkeypatch.setattr(requests.Session, "send", send_mock)
transport = _TransportHTTP("server", ssl=True)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
with pytest.raises(WinRMTransportError) as err:
transport._send_request(prep_request)
assert str(err.value) == "Bad HTTP response returned from the " \
"server. Code: 500, Content: ''"
assert err.value.code == 500
assert err.value.protocol == 'http'
assert err.value.response_text == ''
def test_send_winrm_error_content(self, monkeypatch):
response = requests.Response()
response.status_code = 500
response._content = b"error msg"
send_mock = MagicMock()
send_mock.return_value = response
monkeypatch.setattr(requests.Session, "send", send_mock)
transport = _TransportHTTP("server", ssl=True)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
with pytest.raises(WinRMTransportError) as err:
transport._send_request(prep_request)
assert str(err.value) == "Bad HTTP response returned from the " \
"server. Code: 500, Content: 'error msg'"
assert err.value.code == 500
assert err.value.protocol == 'http'
assert err.value.response_text == 'error msg'
def test_send_winrm_encrypted_single(self, monkeypatch):
response = requests.Response()
response.status_code = 200
response._content = b"content"
response.headers['content-type'] = \
'multipart/encrypted;protocol="application/HTTP-SPNEGO-session-' \
'encrypted";boundary="Encrypted Boundary"'
send_mock = MagicMock()
send_mock.return_value = response
unwrap_mock = MagicMock()
unwrap_mock.return_value = b"unwrapped content"
monkeypatch.setattr(requests.Session, "send", send_mock)
monkeypatch.setattr(WinRMEncryption, "unwrap_message", unwrap_mock)
transport = _TransportHTTP("server", ssl=False)
transport.encryption = WinRMEncryption(None, None)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
actual = transport._send_request(prep_request)
assert actual == b"unwrapped content"
assert send_mock.call_count == 1
assert send_mock.call_args[0] == (prep_request,)
assert send_mock.call_args[1]['timeout'] == (30, 30)
assert unwrap_mock.call_count == 1
assert unwrap_mock.call_args[0] == (b"content", "Encrypted Boundary")
assert unwrap_mock.call_args[1] == {}
def test_send_winrm_encrypted_multiple(self, monkeypatch):
response = requests.Response()
response.status_code = 200
response._content = b"content"
response.headers['content-type'] = \
'multipart/x-multi-encrypted;protocol="application/HTTP-CredSSP-' \
'session-encrypted";boundary="Encrypted Boundary"'
send_mock = MagicMock()
send_mock.return_value = response
unwrap_mock = MagicMock()
unwrap_mock.return_value = b"unwrapped content"
monkeypatch.setattr(requests.Session, "send", send_mock)
monkeypatch.setattr(WinRMEncryption, "unwrap_message", unwrap_mock)
transport = _TransportHTTP("server", ssl=False)
transport.encryption = WinRMEncryption(None, None)
session = transport._build_session()
transport.session = session
request = requests.Request('POST', transport.endpoint, data=b"data")
prep_request = session.prepare_request(request)
actual = transport._send_request(prep_request)
assert actual == b"unwrapped content"
assert send_mock.call_count == 1
assert send_mock.call_args[0] == (prep_request,)
assert send_mock.call_args[1]['timeout'] == (30, 30)
assert unwrap_mock.call_count == 1
assert unwrap_mock.call_args[0] == (b"content", "Encrypted Boundary")
assert unwrap_mock.call_args[1] == {}
@pytest.mark.parametrize('ssl, server, port, path, expected', [
[True, 'server', 5986, 'wsman', 'https://server:5986/wsman'],
[False, 'server', 5985, 'wsman', 'http://server:5985/wsman'],
[False, 'server', 5985, 'iis-wsman', 'http://server:5985/iis-wsman'],
[True, '127.0.0.1', 443, 'wsman', 'https://127.0.0.1:443/wsman'],
[False, '2001:0db8:0a0b:12f0:0000:0000:0000:0001', 80, 'path',
'http://[2001:db8:a0b:12f0::1]:80/path'],
[False, '2001:db8:a0b:12f0::1', 80, 'path',
'http://[2001:db8:a0b:12f0::1]:80/path'],
[False, '2001:0db8:0a0b:12f0:0001:0001:0001:0001', 5985, 'wsman',
'http://[2001:db8:a0b:12f0:1:1:1:1]:5985/wsman'],
[False, 'FE80::0202:B3FF:FE1E:8329', 5985, 'wsman',
'http://[fe80::202:b3ff:fe1e:8329]:5985/wsman'],
[True, '[2001:0db8:0a0b:12f0:0000:0000:0000:0001]', 5986, 'wsman',
'https://[2001:0db8:0a0b:12f0:0000:0000:0000:0001]:5986/wsman'],
])
def test_endpoint_forms(self, ssl, server, port, path, expected):
actual = _TransportHTTP._create_endpoint(ssl, server, port, path)
assert actual == expected
| 46.472699
| 929
| 0.623838
| 6,969
| 59,578
| 5.194863
| 0.073755
| 0.016048
| 0.027705
| 0.013673
| 0.845869
| 0.809077
| 0.768693
| 0.739939
| 0.71983
| 0.708809
| 0
| 0.040773
| 0.255715
| 59,578
| 1,281
| 930
| 46.508977
| 0.775658
| 0.02088
| 0
| 0.642923
| 0
| 0.023127
| 0.302348
| 0.041213
| 0
| 0
| 0
| 0
| 0.303423
| 1
| 0.092507
| false
| 0.018501
| 0.019426
| 0.012951
| 0.13136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f29288604459253d7cb9e3d68a91d869abee8de
| 201
|
py
|
Python
|
src/c3tools/lib/__init__.py
|
conao3/python-c3tools
|
2ad0259d2036ef37a67ae6774efb47e3ab0fface
|
[
"Apache-2.0"
] | null | null | null |
src/c3tools/lib/__init__.py
|
conao3/python-c3tools
|
2ad0259d2036ef37a67ae6774efb47e3ab0fface
|
[
"Apache-2.0"
] | null | null | null |
src/c3tools/lib/__init__.py
|
conao3/python-c3tools
|
2ad0259d2036ef37a67ae6774efb47e3ab0fface
|
[
"Apache-2.0"
] | null | null | null |
from . import openapi # noqa
from . import pydantic # noqa
from . import random # noqa
from . import stdin # noqa
from . import string # noqa
from . import subr # noqa
from . import yaml # noqa
| 25.125
| 30
| 0.686567
| 28
| 201
| 4.928571
| 0.357143
| 0.507246
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243781
| 201
| 7
| 31
| 28.714286
| 0.907895
| 0.169154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4f58a870209c7214a98dfab5efb00fcaf42d5382
| 332
|
py
|
Python
|
clpy/statistics/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
clpy/statistics/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
clpy/statistics/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
# Functions from the following NumPy document
# http://docs.scipy.org/doc/numpy/reference/routines.statistics.html
# "NOQA" to suppress flake8 warning
from clpy.statistics import correlation # NOQA
from clpy.statistics import histogram # NOQA
from clpy.statistics import meanvar # NOQA
from clpy.statistics import order # NOQA
| 36.888889
| 68
| 0.795181
| 45
| 332
| 5.866667
| 0.577778
| 0.121212
| 0.272727
| 0.363636
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003472
| 0.13253
| 332
| 8
| 69
| 41.5
| 0.913194
| 0.493976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f69378eb6da1f4b15ae8c5cef08139c57633129
| 33
|
py
|
Python
|
deepstack/intelligencelayer/shared/scene/__init__.py
|
mayop/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | 1
|
2021-01-03T05:47:42.000Z
|
2021-01-03T05:47:42.000Z
|
deepstack/intelligencelayer/shared/scene/__init__.py
|
robmarkcole/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | null | null | null |
deepstack/intelligencelayer/shared/scene/__init__.py
|
robmarkcole/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | null | null | null |
from .process import SceneModel
| 16.5
| 32
| 0.818182
| 4
| 33
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 1
| 33
| 33
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4fac9249a4507ce0b7812ed76dd2b4ea6b84810e
| 127
|
py
|
Python
|
model/__init__.py
|
Eren-Corn0712/CV_DL-Contrastive-Learning
|
c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Eren-Corn0712/CV_DL-Contrastive-Learning
|
c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Eren-Corn0712/CV_DL-Contrastive-Learning
|
c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c
|
[
"MIT"
] | null | null | null |
from .clrnet import CLRBackbone, CLRLinearClassifier, CLRClassifier
# TODO: If you want to use more model, can add on this
| 31.75
| 68
| 0.771654
| 18
| 127
| 5.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181102
| 127
| 3
| 69
| 42.333333
| 0.942308
| 0.409449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96d69a530e823000fd1676f01d9052bc2fbcbbf2
| 13
|
py
|
Python
|
models/__init__.py
|
gezekun/End-to-end-Lane-Detection-with-Convolution-and-Transformer
|
db243ad33a80069f4223598945d8c45c0af3c335
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
gezekun/End-to-end-Lane-Detection-with-Convolution-and-Transformer
|
db243ad33a80069f4223598945d8c45c0af3c335
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
gezekun/End-to-end-Lane-Detection-with-Convolution-and-Transformer
|
db243ad33a80069f4223598945d8c45c0af3c335
|
[
"MIT"
] | null | null | null |
# 2022 03 01
| 6.5
| 12
| 0.615385
| 3
| 13
| 2.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.888889
| 0.307692
| 13
| 1
| 13
| 13
| 0
| 0.769231
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96e9b2c4ce5fb14474fd6f0b1a0934bcfb5b92cf
| 919
|
py
|
Python
|
tests/threatbutt_test.py
|
ivanlei/threatbutt
|
faff507a4bebfa585d3044427111418c257c34ec
|
[
"Apache-2.0"
] | 55
|
2015-04-25T07:22:18.000Z
|
2021-05-23T15:04:52.000Z
|
tests/threatbutt_test.py
|
ivanlei/threatbutt
|
faff507a4bebfa585d3044427111418c257c34ec
|
[
"Apache-2.0"
] | null | null | null |
tests/threatbutt_test.py
|
ivanlei/threatbutt
|
faff507a4bebfa585d3044427111418c257c34ec
|
[
"Apache-2.0"
] | 8
|
2015-04-27T03:51:49.000Z
|
2021-04-28T22:17:18.000Z
|
# -*- coding: utf-8 -*-
from io import StringIO
from mock import patch
from threatbutt import ThreatButt
def test_ioc():
with patch('sys.stdout', new=StringIO()) as fake_out:
tb = ThreatButt()
tb.clown_strike_ioc('127.0.0.1')
assert len(fake_out.getvalue())
def test_md5():
with patch('sys.stdout', new=StringIO()) as fake_out:
tb = ThreatButt()
tb.bespoke_md5('d41d8cd98f00b204e9800998ecf8427e')
assert len(fake_out.getvalue())
def test_ioc_maltego():
with patch('sys.stdout', new=StringIO()) as fake_out:
tb = ThreatButt(maltegofy=True)
tb.clown_strike_ioc('127.0.0.1')
assert len(fake_out.getvalue())
def test_md5_maltego():
with patch('sys.stdout', new=StringIO()) as fake_out:
tb = ThreatButt(maltegofy=True)
tb.bespoke_md5('d41d8cd98f00b204e9800998ecf8427e')
assert len(fake_out.getvalue())
| 26.257143
| 58
| 0.660501
| 121
| 919
| 4.85124
| 0.289256
| 0.0954
| 0.081772
| 0.122658
| 0.843271
| 0.843271
| 0.843271
| 0.831346
| 0.831346
| 0.606474
| 0
| 0.081044
| 0.207835
| 919
| 34
| 59
| 27.029412
| 0.725275
| 0.022851
| 0
| 0.695652
| 0
| 0
| 0.136161
| 0.071429
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.173913
| false
| 0
| 0.130435
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96f42af5481a62e2bde02c9e2613ca7b569a6324
| 34
|
py
|
Python
|
tests/unittests/durable_functions/activity_trigger_no_anno/main.py
|
vrdmr/azure-functions-python-worker
|
7c9bcc4cc647f4b80a606a1e039d7cf9f3db9624
|
[
"MIT"
] | null | null | null |
tests/unittests/durable_functions/activity_trigger_no_anno/main.py
|
vrdmr/azure-functions-python-worker
|
7c9bcc4cc647f4b80a606a1e039d7cf9f3db9624
|
[
"MIT"
] | null | null | null |
tests/unittests/durable_functions/activity_trigger_no_anno/main.py
|
vrdmr/azure-functions-python-worker
|
7c9bcc4cc647f4b80a606a1e039d7cf9f3db9624
|
[
"MIT"
] | null | null | null |
def main(input):
return input
| 11.333333
| 16
| 0.676471
| 5
| 34
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 34
| 2
| 17
| 17
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
96f4e56b165e717caa6240e5ba13778f3a0000b3
| 66
|
py
|
Python
|
sample/sample.py
|
eaybek/utilio
|
5299558adce907ac40ef46da3e2ce8d4b5e16324
|
[
"MIT"
] | null | null | null |
sample/sample.py
|
eaybek/utilio
|
5299558adce907ac40ef46da3e2ce8d4b5e16324
|
[
"MIT"
] | null | null | null |
sample/sample.py
|
eaybek/utilio
|
5299558adce907ac40ef46da3e2ce8d4b5e16324
|
[
"MIT"
] | null | null | null |
from utilio.utilio import Utilio
class Utilio(object):
pass
| 11
| 32
| 0.742424
| 9
| 66
| 5.444444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 66
| 5
| 33
| 13.2
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8c6246e872f4ef25f0b1c211a1f937d90135e2ea
| 25
|
py
|
Python
|
fun.py
|
omribrand/MyNewRepo
|
4315095715451944ec5d8ea02a6b5136a93fc5e7
|
[
"MIT"
] | null | null | null |
fun.py
|
omribrand/MyNewRepo
|
4315095715451944ec5d8ea02a6b5136a93fc5e7
|
[
"MIT"
] | null | null | null |
fun.py
|
omribrand/MyNewRepo
|
4315095715451944ec5d8ea02a6b5136a93fc5e7
|
[
"MIT"
] | null | null | null |
def bka(a):
return 2*a
| 8.333333
| 12
| 0.6
| 6
| 25
| 2.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.24
| 25
| 2
| 13
| 12.5
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4fb2bfc1cd80130fe83c4c692e0d3f8c0c33c061
| 28
|
py
|
Python
|
tcopy/__init__.py
|
Bogdanp/tcopy
|
bfb072d47317a22e133e7ca8e400cafcf304f85e
|
[
"MIT"
] | 4
|
2015-04-06T14:48:25.000Z
|
2020-08-15T00:09:23.000Z
|
tcopy/__init__.py
|
Bogdanp/tcopy
|
bfb072d47317a22e133e7ca8e400cafcf304f85e
|
[
"MIT"
] | null | null | null |
tcopy/__init__.py
|
Bogdanp/tcopy
|
bfb072d47317a22e133e7ca8e400cafcf304f85e
|
[
"MIT"
] | null | null | null |
from tco import tco # noqa
| 14
| 27
| 0.714286
| 5
| 28
| 4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 28
| 1
| 28
| 28
| 0.952381
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b0189a40e9f73ddfc01fa6402b45f76ffdefef4
| 4,056
|
py
|
Python
|
Source/WebCore/Modules/webgpu/WHLSL/WHLSLBuildStandardLibraryFunctionMap.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 6
|
2021-07-05T16:09:39.000Z
|
2022-03-06T22:44:42.000Z
|
Source/WebCore/Modules/webgpu/WHLSL/WHLSLBuildStandardLibraryFunctionMap.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | 7
|
2022-03-15T13:25:39.000Z
|
2022-03-15T13:25:44.000Z
|
Source/WebCore/Modules/webgpu/WHLSL/WHLSLBuildStandardLibraryFunctionMap.py
|
jacadcaps/webkitty
|
9aebd2081349f9a7b5d168673c6f676a1450a66d
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2014 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import sys
import re
regularExpression = re.compile("/\* Functions named (.*) \*/")
infile = open(sys.argv[1], "r")
contents = infile.read()
infile.close()
outfile = open(sys.argv[2], "w")
outfile.write("""
/*
* Copyright (C) 2019 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "WHLSLStandardLibraryFunctionMap.h"
#if ENABLE(WEBGPU)
namespace WebCore {
namespace WHLSL {
HashMap<String, SubstringLocation> computeStandardLibraryFunctionMap()
{
HashMap<String, SubstringLocation> result;
""")
# FIXME: Compute StandardLibraryFunctionMap at build-time https://bugs.webkit.org/show_bug.cgi?id=199448
previous = 0
previousName = ""
boundary = 0
for match in regularExpression.finditer(contents):
if previous == 0:
previous = match.start()
boundary = previous
previousName = match.group(1)
continue
outfile.write(" result.add(\"" + previousName + "\"_str, SubstringLocation { " + str(previous) + ", " + str(match.start()) + " });\n")
previous = match.start()
previousName = match.group(1)
outfile.write(" result.add(\"" + previousName + "\"_str, SubstringLocation { " + str(previous) + ", " + str(len(contents)) + " });\n")
outfile.write("""
return result;
}
unsigned firstFunctionOffsetInStandardLibrary()
{
""")
outfile.write(" return " + str(boundary) + ";\n")
outfile.write("""
}
}
}
#endif
""")
outfile.close()
| 36.540541
| 141
| 0.735207
| 530
| 4,056
| 5.620755
| 0.332075
| 0.016113
| 0.022826
| 0.030883
| 0.731118
| 0.731118
| 0.731118
| 0.731118
| 0.731118
| 0.731118
| 0
| 0.007454
| 0.173077
| 4,056
| 110
| 142
| 36.872727
| 0.880739
| 0.3464
| 0
| 0.147059
| 0
| 0
| 0.713252
| 0.041127
| 0
| 0
| 0
| 0.009091
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.044118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ca57d60888fa3136e119e2392629b7982bdb971
| 33
|
py
|
Python
|
highcharts/highcharts/__init__.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 370
|
2015-10-07T20:13:10.000Z
|
2022-03-31T03:43:17.000Z
|
highcharts/highcharts/__init__.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 67
|
2016-03-14T12:18:44.000Z
|
2022-02-24T09:24:31.000Z
|
highcharts/highcharts/__init__.py
|
Jbrunn/python-highcharts
|
a4c488ae5c2e125616efad5a722f3dfd8a9bc450
|
[
"MIT"
] | 159
|
2016-02-25T15:07:52.000Z
|
2022-03-12T13:04:14.000Z
|
from .highcharts import Highchart
| 33
| 33
| 0.878788
| 4
| 33
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cbb25d411bc5b5ca7f91784d5c0f70ff00ac433
| 6,324
|
py
|
Python
|
nets/unet.py
|
mymsimple/table-detect
|
99a659346449734b905f7be165d30ff667b6cf93
|
[
"MIT"
] | null | null | null |
nets/unet.py
|
mymsimple/table-detect
|
99a659346449734b905f7be165d30ff667b6cf93
|
[
"MIT"
] | null | null | null |
nets/unet.py
|
mymsimple/table-detect
|
99a659346449734b905f7be165d30ff667b6cf93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 23:11:51 2020
table line detect
@author: chineseocr
"""
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D
from tensorflow.keras.layers import LeakyReLU
def unet(input_shape=(512, 512, 3),num_classes=1):
inputs = Input(shape=input_shape)
# 512
use_bias=False
down0a = Conv2D(16, (3, 3), padding='same',use_bias=use_bias)(inputs)
down0a = BatchNormalization()(down0a)
down0a = LeakyReLU(alpha=0.1)(down0a)
down0a = Conv2D(16, (3, 3), padding='same',use_bias=use_bias)(down0a)
down0a = BatchNormalization()(down0a)
down0a = LeakyReLU(alpha=0.1)(down0a)
down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
# 256
down0 = Conv2D(32, (3, 3), padding='same',use_bias=use_bias)(down0a_pool)
down0 = BatchNormalization()(down0)
down0 = LeakyReLU(alpha=0.1)(down0)
down0 = Conv2D(32, (3, 3), padding='same',use_bias=use_bias)(down0)
down0 = BatchNormalization()(down0)
down0 =LeakyReLU(alpha=0.1)(down0)
down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
# 128
down1 = Conv2D(64, (3, 3), padding='same',use_bias=use_bias)(down0_pool)
down1 = BatchNormalization()(down1)
down1 = LeakyReLU(alpha=0.1)(down1)
down1 = Conv2D(64, (3, 3), padding='same',use_bias=use_bias)(down1)
down1 = BatchNormalization()(down1)
down1 = LeakyReLU(alpha=0.1)(down1)
down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same',use_bias=use_bias)(down1_pool)
down2 = BatchNormalization()(down2)
down2 = LeakyReLU(alpha=0.1)(down2)
down2 = Conv2D(128, (3, 3), padding='same',use_bias=use_bias)(down2)
down2 = BatchNormalization()(down2)
down2 = LeakyReLU(alpha=0.1)(down2)
down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same',use_bias=use_bias)(down2_pool)
down3 = BatchNormalization()(down3)
down3 = LeakyReLU(alpha=0.1)(down3)
down3 = Conv2D(256, (3, 3), padding='same',use_bias=use_bias)(down3)
down3 = BatchNormalization()(down3)
down3 = LeakyReLU(alpha=0.1)(down3)
down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
# 16
down4 = Conv2D(512, (3, 3), padding='same',use_bias=use_bias)(down3_pool)
down4 = BatchNormalization()(down4)
down4 = LeakyReLU(alpha=0.1)(down4)
down4 = Conv2D(512, (3, 3), padding='same',use_bias=use_bias)(down4)
down4 = BatchNormalization()(down4)
down4 = LeakyReLU(alpha=0.1)(down4)
down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
# 8
center = Conv2D(1024, (3, 3), padding='same',use_bias=use_bias)(down4_pool)
center = BatchNormalization()(center)
center = LeakyReLU(alpha=0.1)(center)
center = Conv2D(1024, (3, 3), padding='same',use_bias=use_bias)(center)
center = BatchNormalization()(center)
center = LeakyReLU(alpha=0.1)(center)
# center
up4 = UpSampling2D((2, 2))(center)
up4 = concatenate([down4, up4], axis=3)
up4 = Conv2D(512, (3, 3), padding='same',use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
up4 = Conv2D(512, (3, 3), padding='same',use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
up4 = Conv2D(512, (3, 3), padding='same',use_bias=use_bias)(up4)
up4 = BatchNormalization()(up4)
up4 = LeakyReLU(alpha=0.1)(up4)
# 16
up3 = UpSampling2D((2, 2))(up4)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same',use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
up3 = Conv2D(256, (3, 3), padding='same',use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
up3 = Conv2D(256, (3, 3), padding='same',use_bias=use_bias)(up3)
up3 = BatchNormalization()(up3)
up3 = LeakyReLU(alpha=0.1)(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same',use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
up2 = Conv2D(128, (3, 3), padding='same',use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
up2 = Conv2D(128, (3, 3), padding='same',use_bias=use_bias)(up2)
up2 = BatchNormalization()(up2)
up2 = LeakyReLU(alpha=0.1)(up2)
# 64
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same',use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
up1 = Conv2D(64, (3, 3), padding='same',use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
up1 = Conv2D(64, (3, 3), padding='same',use_bias=use_bias)(up1)
up1 = BatchNormalization()(up1)
up1 = LeakyReLU(alpha=0.1)(up1)
# 128
up0 = UpSampling2D((2, 2))(up1)
up0 = concatenate([down0, up0], axis=3)
up0 = Conv2D(32, (3, 3), padding='same',use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
up0 = Conv2D(32, (3, 3), padding='same',use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
up0 = Conv2D(32, (3, 3), padding='same',use_bias=use_bias)(up0)
up0 = BatchNormalization()(up0)
up0 = LeakyReLU(alpha=0.1)(up0)
# 256
up0a = UpSampling2D((2, 2))(up0)
up0a = concatenate([down0a, up0a], axis=3)
up0a = Conv2D(16, (3, 3), padding='same',use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
up0a = Conv2D(16, (3, 3), padding='same',use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
up0a = Conv2D(16, (3, 3), padding='same',use_bias=use_bias)(up0a)
up0a = BatchNormalization()(up0a)
up0a = LeakyReLU(alpha=0.1)(up0a)
# 512
classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0a)
model = Model(inputs=inputs, outputs=classify)
return model
| 38.797546
| 110
| 0.646743
| 891
| 6,324
| 4.499439
| 0.088664
| 0.113495
| 0.071838
| 0.103767
| 0.813669
| 0.798204
| 0.798204
| 0.757795
| 0.757795
| 0.74233
| 0
| 0.110727
| 0.175996
| 6,324
| 162
| 111
| 39.037037
| 0.658607
| 0.026407
| 0
| 0.666667
| 0
| 0
| 0.022008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00813
| false
| 0
| 0.02439
| 0
| 0.04065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5069c9abbef88f46faee541c6e023d234368e699
| 125
|
py
|
Python
|
tests/setup-transaction_generator/test_setup.py
|
metronotes-testing/transaction-generator
|
98f00e7041ce01f80d0df2f3fc58c0157552ad97
|
[
"MIT"
] | 1
|
2020-01-30T05:25:36.000Z
|
2020-01-30T05:25:36.000Z
|
tests/setup-transaction_generator/test_setup.py
|
harmony-one/transaction-generator
|
98f00e7041ce01f80d0df2f3fc58c0157552ad97
|
[
"MIT"
] | null | null | null |
tests/setup-transaction_generator/test_setup.py
|
harmony-one/transaction-generator
|
98f00e7041ce01f80d0df2f3fc58c0157552ad97
|
[
"MIT"
] | null | null | null |
import pytest
import transaction_generator as tx_gen
@pytest.fixture(scope="session", autouse=True)
def setup():
pass
| 13.888889
| 46
| 0.76
| 17
| 125
| 5.470588
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144
| 125
| 8
| 47
| 15.625
| 0.869159
| 0
| 0
| 0
| 0
| 0
| 0.056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
50c9782d4a6b4a7570201b5f44808e9f18869182
| 132
|
py
|
Python
|
pose_format/tensorflow/masked/__init__.py
|
yairc2223/pose-format
|
6556433193582f8a7ed80d58d19ec11749e8606b
|
[
"MIT"
] | 11
|
2020-09-02T02:58:23.000Z
|
2022-01-20T09:17:26.000Z
|
pose_format/tensorflow/masked/__init__.py
|
yairc2223/pose-format
|
6556433193582f8a7ed80d58d19ec11749e8606b
|
[
"MIT"
] | 5
|
2021-12-10T15:48:59.000Z
|
2022-02-21T15:53:20.000Z
|
pose_format/tensorflow/masked/__init__.py
|
yairc2223/pose-format
|
6556433193582f8a7ed80d58d19ec11749e8606b
|
[
"MIT"
] | 6
|
2020-09-21T02:21:26.000Z
|
2022-02-05T17:18:44.000Z
|
from pose_format.tensorflow.masked.tensor import MaskedTensor
from pose_format.tensorflow.masked.tensorflow import MaskedTensorflow
| 44
| 69
| 0.893939
| 16
| 132
| 7.25
| 0.5625
| 0.137931
| 0.241379
| 0.413793
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 132
| 2
| 70
| 66
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
50d050397171e66f93442e98d2a2871b8d0c4f08
| 2,376
|
py
|
Python
|
xndtools/kernel_generator/tests/test_test_mixed.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 3
|
2019-11-12T16:01:26.000Z
|
2020-06-27T19:27:27.000Z
|
xndtools/kernel_generator/tests/test_test_mixed.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 4
|
2018-04-25T17:12:43.000Z
|
2018-08-23T18:17:24.000Z
|
xndtools/kernel_generator/tests/test_test_mixed.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 6
|
2018-05-04T08:10:40.000Z
|
2019-03-19T10:00:21.000Z
|
import pytest
from xndtools.kernel_generator.utils import NormalizedTypeMap
from xnd import xnd
m = pytest.importorskip("test_mixed")
long_t = NormalizedTypeMap()('long')
def assert_equal(x, y):
assert x == y and x.dtype == y.dtype
def test_mixed_matrices_CF_inout():
a = xnd([[10, 20],
[30, 40]], type=f'2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'!2 * 2 * {long_t}')
r = m.test_mixed_matrices_inout_CF(a, b)
assert_equal(r, xnd(26, type=long_t))
a = xnd([[10, 20],
[30, 40]], type=f'!2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'2 * 2 * {long_t}')
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
r = m.test_mixed_matrices_inout_CF(a, b)
a = xnd([[10, 20],
[30, 40]], type=f'2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'2 * 2 * {long_t}')
with pytest.raises(ValueError, match=r'.* must be F-contiguous .*'):
r = m.test_mixed_matrices_inout_CF(a, b)
def test_mixed_matrices_FC_inout():
a = xnd([[10, 20],
[30, 40]], type=f'!2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'2 * 2 * {long_t}')
r = m.test_mixed_matrices_inout_FC(a, b)
assert_equal(r, xnd(37, type=long_t))
def test_mixed_matrices_CC_inout():
a = xnd([[10, 20],
[30, 40]], type=f'2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'2 * 2 * {long_t}')
r = m.test_mixed_matrices_inout_CC(a, b)
assert_equal(r, xnd(27, type=long_t))
a = xnd([[10, 20],
[30, 40]], type=f'!2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'!2 * 2 * {long_t}')
with pytest.raises(ValueError, match=r'.* must be C-contiguous .*'):
r = m.test_mixed_matrices_inout_CC(a, b)
def test_mixed_matrices_FF_inout():
a = xnd([[10, 20],
[30, 40]], type=f'!2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'!2 * 2 * {long_t}')
r = m.test_mixed_matrices_inout_FF(a, b)
assert_equal(r, xnd(36, type=long_t))
a = xnd([[10, 20],
[30, 40]], type=f'2 * 2 * {long_t}')
b = xnd([[5, 6],
[7, 8]], type=f'2 * 2 * {long_t}')
with pytest.raises(ValueError, match=r'.* must be F-contiguous .*'):
r = m.test_mixed_matrices_inout_FF(a, b)
| 30.857143
| 72
| 0.520202
| 395
| 2,376
| 2.936709
| 0.136709
| 0.090517
| 0.082759
| 0.096552
| 0.790517
| 0.790517
| 0.702586
| 0.702586
| 0.702586
| 0.685345
| 0
| 0.07907
| 0.276094
| 2,376
| 76
| 73
| 31.263158
| 0.595349
| 0
| 0
| 0.728814
| 0
| 0
| 0.160354
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 1
| 0.084746
| false
| 0
| 0.067797
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ff32fc3b840e09f50cb50e169fb701aecb71f60
| 23,947
|
py
|
Python
|
core/app.py
|
Volentix/EZEOS
|
611048ace245e0b5776fe23b0eb32352a01300ae
|
[
"MIT"
] | 12
|
2018-07-06T01:49:45.000Z
|
2019-05-15T20:54:07.000Z
|
core/app.py
|
Volentix/EZEOS
|
611048ace245e0b5776fe23b0eb32352a01300ae
|
[
"MIT"
] | 1
|
2018-11-02T14:31:48.000Z
|
2018-11-02T14:31:48.000Z
|
core/app.py
|
Volentix/EZEOS
|
611048ace245e0b5776fe23b0eb32352a01300ae
|
[
"MIT"
] | 4
|
2018-07-17T21:07:37.000Z
|
2019-05-15T06:32:42.000Z
|
#!/usr/bin/env python
# coding: utf-8
from tkinter import Tk
from gui.ui import UI
from core import DOCKER_CONTAINER_NAME
from core import TIMEOUT
from core import btc, bch, dash, eth, ltc, neo, xmr
import subprocess
import os
import signal
def run():
global app
root = Tk()
app = UI(root)
# Application
getCleosCommand()
# print(app.tabPanel.producer.get())
root.lift()
root.attributes('-topmost', True)
root.attributes('-topmost', False)
root.mainloop()
def handler(signum, frame):
raise RuntimeError("End of time")
def getCleosCommand():
# TODO The docker has to be removed since was deprecated.
global DOCKER_COMMAND
DOCKER_COMMAND = ['docker', 'exec', DOCKER_CONTAINER_NAME]
CLEOS_COMMAND = ['/opt/eosio/bin/cleos', '-h']
global cleos
# try:
# subprocess.check_output(DOCKER_COMMAND+CLEOS_COMMAND)
# except OSError as e:
# cleos = ['cleos']
# except Exception as e:
# cleos = ['cleos']
# else:
# cleos = ['docker', 'exec', DOCKER_CONTAINER_NAME, '/opt/eosio/bin/cleos']
try:
subprocess.check_output(['cleos', '-h'])
except OSError as e:
app.outputPanel.logger('Can not find the cleos command.\n' + str(e))
except Exception as e:
app.outputPanel.logger('Something went wrong \n' + str(e))
else:
cleos = ['cleos']
# Logic functions
def getProducerInfo():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'info'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Producer is not available\n' + str(e)
except Exception as e:
print(e)
out = 'Could not get info.\n' + str(e)
finally:
app.outputPanel.logger(out)
def getBlockInfo():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(),
'get', 'block', app.tabPanel.blockNumber.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get block info\n' + str(e)
except Exception as e:
print(e)
out = 'Could not get block info.\n' + str(e)
finally:
app.outputPanel.logger(out)
def getBlockProducers():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(),
'system', 'listproducers'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get producer list\n' + str(e)
except Exception as e:
print(e)
out = "Could not get producer list.\n" + str(e)
finally:
app.outputPanel.logger(out)
def getWalletList():
try:
out = subprocess.run(cleos + ['wallet', 'list'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get wallet list\n' + str(e)
except Exception as e:
print(e)
out = "Could not get wallet list. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getWalletListFilesystem():
if 'docker' in cleos:
# docker exec eos ls /root/eosio-wallet | egrep '\.wallet$'
out = b"Found wallets in filesystem inside docker container:\n> /root/eosio-wallet\n\n"
com = " ".join(DOCKER_COMMAND + ['ls', '/root/eosio-wallet', '|', 'egrep', '\.wallet$'])
out += subprocess.check_output(com, shell=True)
else:
# ls ~/eosio-wallet | egrep '\.wallet$'
out = b"Found wallets in filesystem:\n> ~/eosio-wallet\n\n"
com = " ".join(['ls', '~/eosio-wallet', '|', 'egrep', '\.wallet$'])
out += subprocess.check_output(com, shell=True)
app.outputPanel.logger(out)
def createWallet():
toConsole = app.tabPanel.toConsole.get()
if 'docker' in cleos:
# docker - cleos wallet create -n twal --file /root/twal saved indide docker /root/
try:
if toConsole == '--to-console':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
elif toConsole == '--file':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--file', "/root/" + app.tabPanel.walletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create wallet\n' + str(e)
except Exception as e:
print(e)
out = "Could not create wallet.\n" + str(e)
finally:
app.tabPanel.openWalletName.insert(0, app.tabPanel.walletName.get())
app.outputPanel.logger(out)
else:
walletDir = os.environ['HOME'] + '/eosio-wallet'
if not os.path.exists(walletDir):
os.makedirs(walletDir)
try:
if toConsole == '--to-console':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
elif toConsole == '--file':
out = subprocess.run(cleos + ['wallet', 'create', '-n', app.tabPanel.walletName.get(),
'--file', walletDir + "/" + app.tabPanel.walletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create wallet\n' + str(e)
except Exception as e:
print(e)
out = "Could not create wallet.\n" + str(e)
finally:
app.tabPanel.openWalletName.insert(0, app.tabPanel.walletName.get())
app.outputPanel.logger(out)
def openWallet():
try:
out = subprocess.run(cleos + ['wallet', 'open', '-n', app.tabPanel.openWalletName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not open the wallet\n' + str(e)
except Exception as e:
print(e)
out = 'Could not open the wallet.\n' + str(e)
finally:
if 'Opened' in out:
out += "\nRemember this wallet as default for this core session!"
app.outputPanel.logger(out)
def unlockWallet(password):
try:
out = subprocess.run(cleos + ['wallet', 'unlock', '-n', app.tabPanel.openWalletName.get(), '--password', password],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Unlock the wallet\n' + str(e)
except Exception as e:
print(e)
out = 'Could not unlock the wallet.\n' + str(e)
finally:
app.outputPanel.logger(out)
def showKeys():
try:
out = subprocess.run(cleos + ['wallet', 'keys'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not show keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not show keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def showPrivateKeys(password):
try:
out = subprocess.run(cleos + ['wallet', 'private_keys', '-n', app.tabPanel.openWalletName.get(), '--password', password],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not show private keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not show private keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def importKey(key):
try:
out = subprocess.run(cleos + ['wallet', 'import', '-n', app.tabPanel.openWalletName.get(), '--private-key', key],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not import the key\n' + str(e)
except Exception as e:
print(e)
out = 'Could not import the key.\n' + str(e)
finally:
app.outputPanel.logger(out)
def createKeys():
# TODO add --tofile feature
try:
out = subprocess.run(cleos + ['create', 'key', '--to-console'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not create keys\n' + str(e)
except Exception as e:
print(e)
out = 'Could not create keys.\n' + str(e)
finally:
app.outputPanel.logger(out)
def compileContract():
cpp = app.tabPanel.contractFileCPP.get()
wasm = app.tabPanel.contractFileWASM.get()
wast = app.tabPanel.contractFileWAST.get()
abi = app.tabPanel.contractFileABI.get()
try:
out = subprocess.run(['eosio-cpp', '-o', wasm, cpp, '--abigen'],
timeout=TIMEOUT+60, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not compile contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not compile contract.\n' + str(e)
finally:
if 'error' in out:
app.outputPanel.logger(out)
else:
app.outputPanel.logger("Compile successful\n\n" + out)
try:
out = subprocess.run(['eosio-cpp', '-o', wast, cpp, '--abigen'],
timeout=TIMEOUT+60, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not compile contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not compile contract.\n' + str(e)
finally:
if 'error' in out:
app.outputPanel.logger(out)
else:
app.outputPanel.logger("Compile successful\n\n" + out)
def setContract():
cpp = app.tabPanel.contractFileCPP.get()
wasm = app.tabPanel.contractFileWASM.get()
wast = app.tabPanel.contractFileWAST.get()
abi = app.tabPanel.contractFileABI.get()
try:
out_code = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'set', 'code', app.tabPanel.accountName.get(), wasm],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out_abi = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'set', 'abi', app.tabPanel.accountName.get(), abi],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out_code = out_code.stdout.decode('utf-8')
out_abi = out_abi.stdout.decode('utf-8')
out = str(out_code) + str(out_abi)
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not set contract\n' + str(e)
except Exception as e:
print(e)
out = 'Could not set contract.\n' + str(e)
finally:
app.outputPanel.logger("Contract successfully pished to the net.\n\n" + out)
def getAccountBalance():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get() ,'get', 'currency', 'balance', 'eosio.token', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account balance\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account balance. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountDetails():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get() ,'get', 'account', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account details\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account details. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountActions():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'actions', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account actions\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account actions. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountCode():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'code', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account code\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account code. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountAbi():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'abi', app.tabPanel.accountName.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account abi\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account abi. \n" + str(e)
finally:
app.outputPanel.logger(out)
def getAccountTable():
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'get', 'table', app.tabPanel.accountName.get(), app.tabPanel.accountScope.get(), app.tabPanel.accountTable.get(), '-L', app.tabPanel.accountLower.get(), '-l', app.tabPanel.accountLimit.get()],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not get account table\n' + str(e)
except Exception as e:
print(e)
out = "Could not get account table. \n" + str(e)
finally:
app.outputPanel.logger(out)
def buyRam():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
ram = app.tabPanel.ram.get()
# #buy ram for yourself
# cleos system buyram someaccount1 someaccount1 "10 EOS"
#
# #buy ram for someone else
# cleos system buyram someaccount1 someaccount2 "1 EOS"
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'buyram', creator, owner, ram],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not buy RAM\n' + str(e)
except Exception as e:
print(e)
out = "Could not get but RAM. \n" + str(e)
finally:
app.outputPanel.logger(out)
def stakeNet():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
net = app.tabPanel.net.get()
cpu = app.tabPanel.cpu.get()
# cleos system delegatebw accountname1 accountname2 "1 SYS" "1 SYS"
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'delegatebw', creator, owner, net, cpu],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stake NET\n' + str(e)
except Exception as e:
print(e)
out = "Could not get stake NET. \n" + str(e)
finally:
app.outputPanel.logger(out)
def createAccount():
creator = app.tabPanel.accountCreator.get()
owner = app.tabPanel.accountOwner.get()
activeKey = app.tabPanel.accountActiveKey.get()
ownerKey = app.tabPanel.accountOwnerKey.get()
cpu = app.tabPanel.cpu.get()
net = app.tabPanel.net.get()
ram = app.tabPanel.ram.get()
permission = creator + '@active'
# cleos -u http://IP-HERE:8888 system newaccount --stake-net "0.1000 EOS" --stake-cpu "0.1000 EOS" --buy-ram-kbytes 8 eosio myDesiredAccountName Public key Public key
try:
out = subprocess.run(cleos + ['--url', app.tabPanel.producer.get(), 'system', 'newaccount', creator, owner, ownerKey, activeKey, '--stake-net', net, '--stake-cpu', cpu, '--buy-ram', ram, '--transfer', '-p', permission],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stake NET\n' + str(e)
except Exception as e:
print(e)
out = "Could not get stake NET. \n" + str(e)
finally:
app.outputPanel.logger(out)
def setWalletDir():
stop = stopKeosd(False)
run = runKeosd(False)
app.outputPanel.logger(stop + '\n' + run)
def stopKeosd(flag):
if flag:
try:
out = subprocess.run(cleos + ['wallet', 'stop'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stop keosd\n' + str(e)
except Exception as e:
print(e)
out = "Could not stop keosd. \n" + str(e)
finally:
app.outputPanel.logger(out)
else:
try:
out = subprocess.run(cleos + ['wallet', 'stop'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = 'Timeout. Can not stop keosd\n' + str(e)
except Exception as e:
print(e)
out = "Could not stop keosd. \n" + str(e)
finally:
return out
def runKeosd(flag):
# TODO rewrite function
if flag:
try:
out = os.spawnl(os.P_NOWAIT, 'keosd', '--wallet-dir', '~/eosio-wallet')
except Exception as e:
print('Could not run keosd by default path: ' + str(e))
out = "Could not run keosd by default path: " + str(e)
finally:
app.outputPanel.logger(str(out))
else:
try:
out = os.spawnl(os.P_NOWAIT, 'keosd', '--wallet-dir', app.tabPanel.walletDir.get())
except Exception as e:
print('Could not run keosd ' + str(e))
out = "Could not run keosd " + str(e)
finally:
return str(out)
# Currency operations
def getBtcBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = btc.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get BTC balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get BTC balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getEthBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = eth.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get ETH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get ETH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getXmrBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = xmr.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get XMR balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get XMR balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getNeoBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = neo.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get NEO balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get NEO balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getLtcBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = ltc.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get LTC balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get LTC balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getBchBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = bch.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get BCH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get BCH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
def getDashBalance(address):
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEOUT)
try:
out = dash.getBalance(address)
except RuntimeError as e:
print(e)
out = 'Can not get DASH balance. Timeout error.\n' + str(e)
except Exception as e:
print(e)
out = 'Can not get DASH balance.\n' + str(e)
finally:
signal.alarm(0)
app.outputPanel.logger(out)
| 35.902549
| 268
| 0.587798
| 2,886
| 23,947
| 4.867983
| 0.094248
| 0.015375
| 0.024201
| 0.042281
| 0.813652
| 0.781835
| 0.755783
| 0.728237
| 0.7266
| 0.678198
| 0
| 0.004007
| 0.280954
| 23,947
| 666
| 269
| 35.956456
| 0.811894
| 0.042594
| 0
| 0.689408
| 0
| 0
| 0.15176
| 0.000961
| 0
| 0
| 0
| 0.001502
| 0
| 1
| 0.064632
| false
| 0.007181
| 0.021544
| 0
| 0.089767
| 0.122083
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ff925beaf5ef2567704c3c1920f77502dc58969
| 4,747
|
py
|
Python
|
pyflux/gas/tests/gas_llev_tests_skewt.py
|
ThomasHoppe/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 2,091
|
2016-04-01T02:52:10.000Z
|
2022-03-29T11:38:15.000Z
|
pyflux/gas/tests/gas_llev_tests_skewt.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 160
|
2016-04-26T14:52:18.000Z
|
2022-03-15T02:09:07.000Z
|
pyflux/gas/tests/gas_llev_tests_skewt.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 264
|
2016-05-02T14:03:31.000Z
|
2022-03-29T07:48:20.000Z
|
import numpy as np
import pyflux as pf
noise = np.random.normal(0,1,200)
data = np.zeros(200)
for i in range(1,len(data)):
data[i] = 1.0*data[i-1] + noise[i]
countdata = np.random.poisson(3,200)
def test_skewt_couple_terms():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_couple_terms_integ():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, integ=1, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi():
"""
Tests an GAS model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_mh():
"""
Tests an GAS model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
""" Uncomment in future if Skewt becomes more robust
def test_skewt_laplace():
Tests an GAS model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
def test_skewt_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_skewt_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_skewt_predict_nans():
"""
Tests that the predictions are not nans
model = pf.GASLLEV(data=data, family=pf.Skewt())
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
"""
def test_skewt_predict_is_nans():
Tests that the in-sample predictions are not nans
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
"""
| 33.195804
| 87
| 0.709711
| 793
| 4,747
| 4.147541
| 0.128625
| 0.095774
| 0.059593
| 0.076619
| 0.901186
| 0.857708
| 0.839465
| 0.816358
| 0.816358
| 0.799331
| 0
| 0.016316
| 0.147883
| 4,747
| 142
| 88
| 33.429577
| 0.796786
| 0.240152
| 0
| 0.569231
| 0
| 0
| 0.00805
| 0
| 0
| 0
| 0
| 0
| 0.261538
| 1
| 0.169231
| false
| 0
| 0.030769
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ba06bd7bbbbf073ea68c4060dc53dffd638beb95
| 62
|
py
|
Python
|
libs/query_set.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 2
|
2015-05-01T19:44:41.000Z
|
2015-07-17T13:52:46.000Z
|
libs/query_set.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 13
|
2019-10-18T17:06:52.000Z
|
2022-02-10T07:37:30.000Z
|
libs/query_set.py
|
jessamynsmith/quotations
|
b2a9b70190756fa261840faea181860b166e253f
|
[
"MIT"
] | 3
|
2015-05-06T15:38:30.000Z
|
2015-07-26T21:12:32.000Z
|
def get_random(query_set):
return query_set.order_by('?')
| 20.666667
| 34
| 0.725806
| 10
| 62
| 4.1
| 0.8
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 35
| 31
| 0.759259
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e83bd0da7cea890b9cffd07d8619934a628dd1e1
| 34,860
|
py
|
Python
|
tests/unit/test_property_wizard.py
|
rnag/dataclass-wizard
|
5bccf63daea217aff6e77fd00ed8f7bed87f0377
|
[
"Apache-2.0"
] | 19
|
2021-11-05T20:29:56.000Z
|
2022-03-31T02:51:25.000Z
|
tests/unit/test_property_wizard.py
|
rnag/dataclass-wizard
|
5bccf63daea217aff6e77fd00ed8f7bed87f0377
|
[
"Apache-2.0"
] | 6
|
2021-10-20T23:24:04.000Z
|
2022-03-01T18:49:14.000Z
|
tests/unit/test_property_wizard.py
|
rnag/dataclass-wizard
|
5bccf63daea217aff6e77fd00ed8f7bed87f0377
|
[
"Apache-2.0"
] | null | null | null |
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime
from typing import Union, List, ClassVar, DefaultDict, Set
import pytest
from dataclass_wizard import property_wizard
from ..conftest import Literal, Annotated, PY39_OR_ABOVE, PY310_OR_ABOVE
log = logging.getLogger(__name__)
def test_property_wizard_does_not_affect_normal_properties():
"""
The `property_wizard` should not otherwise affect normal properties (i.e. ones
that don't have their property names (or underscored names) annotated as a
dataclass field.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
def __post_init__(self):
self.wheels = 4
self._my_prop = 0
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
@property
def _my_prop(self) -> int:
return self.my_prop
@_my_prop.setter
def _my_prop(self, my_prop: Union[int, str]):
self.my_prop = int(my_prop) + 5
v = Vehicle()
log.debug(v)
assert v.wheels == 4
assert v._my_prop == 5
# These should all result in a `TypeError`, as neither `wheels` nor
# `_my_prop` are valid arguments to the constructor, as they are just
# normal properties.
with pytest.raises(TypeError):
_ = Vehicle(wheels=3)
with pytest.raises(TypeError):
_ = Vehicle('6')
with pytest.raises(TypeError):
_ = Vehicle(_my_prop=2)
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
v._my_prop = '5'
assert v._my_prop == 10, 'Expected assignment to use the setter method'
def test_property_wizard_does_not_affect_read_only_properties():
"""
The `property_wizard` should not otherwise affect properties which are
read-only (i.e. ones which don't define a `setter` method)
"""
@dataclass
class Vehicle(metaclass=property_wizard):
list_of_wheels: list = field(default_factory=list)
@property
def wheels(self) -> int:
return len(self.list_of_wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
# AttributeError: can't set attribute
with pytest.raises(AttributeError):
v.wheels = 3
v = Vehicle(list_of_wheels=[1, 2, 1])
assert v.wheels == 3
v.list_of_wheels = [0]
assert v.wheels == 1
def test_property_wizard_does_not_error_when_forward_refs_are_declared():
"""
Using `property_wizard` when the dataclass has a forward reference
defined in a type annotation.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
fire_truck: 'Truck'
cars: List['Car'] = field(default_factory=list)
_wheels: Union[int, str] = 4
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
@dataclass
class Car:
tires: int
@dataclass
class Truck:
color: str
truck = Truck('red')
v = Vehicle(fire_truck=truck)
log.debug(v)
assert v.wheels == 4
v = Vehicle(fire_truck=truck, wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle(truck, [Car(4)], '6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_underscored_field():
"""
Using `property_wizard` when the dataclass has an public property and an
underscored field name.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str] = 4
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 4
# Note that my IDE complains here, and suggests `_wheels` as a possible
# keyword argument to the constructor method; however, that's wrong and
# will error if you try it way.
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_field():
"""
Using `property_wizard` when the dataclass has both a property and field
name *without* a leading underscore.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
wheels: Union[int, str] = 4
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
@pytest.mark.skipif(not PY310_OR_ABOVE, reason='requires Python 3.10 or higher')
def test_property_wizard_with_public_property_and_field_with_or():
"""
Using `property_wizard` when the dataclass has both a property and field
name *without* a leading underscore, and using the OR ("|") operator in
Python 3.10+, instead of the `typing.Union` usage.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
wheels: int | str = 4
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field():
"""
Using `property_wizard` when the dataclass has an underscored property and
a public field name.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = 4
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 4
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_field():
"""
Using `property_wizard` when the dataclass has both a property and field
name with a leading underscore.
Note: this approach is generally *not* recommended, because the IDE won't
know that the property or field name will be transformed to a public field
name without the leading underscore, so it won't offer the desired type
hints and auto-completion here.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `_wheels` here will be ignored, since `_wheels` is
# simply re-assigned on the following property definition.
_wheels: Union[int, str] = 4
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
# Note that my IDE complains here, and suggests `_wheels` as a possible
# keyword argument to the constructor method; however, that's wrong and
# will error if you try it way.
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_annotated_field():
"""
Using `property_wizard` when the dataclass has both a property and field
name *without* a leading underscore, and the field is a
:class:`typing.Annotated` type.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
wheels: Annotated[Union[int, str], field(default=4)] = None
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 4
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_private_property_and_annotated_field_with_no_useful_extras():
"""
Using `property_wizard` when the dataclass has both a property and field
name with a leading underscore, and the field is a
:class:`typing.Annotated` type without any extras that are a
:class:`dataclasses.Field` type.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
_wheels: Annotated[Union[int, str], 'Hello world!', 123] = None
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_multiple_inheritance():
"""
When using multiple inheritance or when extending from more than one
class, and if any of the super classes define properties that should also
be `dataclass` fields, then the recommended approach is to define the
`property_wizard` metaclass on each class that has such properties. Note
that the last class in the below example (Car) doesn't need to use this
metaclass, as it doesn't have any properties that meet this condition.
"""
@dataclass
class VehicleWithWheels(metaclass=property_wizard):
_wheels: Union[int, str] = field(default=4)
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
@dataclass
class Vehicle(VehicleWithWheels, metaclass=property_wizard):
_windows: Union[int, str] = field(default=6)
@property
def windows(self) -> int:
return self._windows
@windows.setter
def windows(self, windows: Union[int, str]):
self._windows = int(windows)
@dataclass
class Car(Vehicle):
my_list: List[str] = field(default_factory=list)
v = Car()
log.debug(v)
assert v.wheels == 4
assert v.windows == 6
assert v.my_list == []
# Note that my IDE complains here, and suggests `_wheels` as a possible
# keyword argument to the constructor method; however, that's wrong and
# will error if you try it way.
v = Car(wheels=3, windows=5, my_list=['hello', 'world'])
log.debug(v)
assert v.wheels == 3
assert v.windows == 5
assert v.my_list == ['hello', 'world']
v = Car('6', '7', ['testing'])
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
assert v.windows == 7, 'The constructor should use our setter method'
assert v.my_list == ['testing']
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
v.windows = '321'
assert v.windows == 321, 'Expected assignment to use the setter method'
# NOTE: the below test cases are added for coverage purposes
def test_property_wizard_with_public_property_and_underscored_field_without_default_value():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field *without* a default value explicitly set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str]
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_underscored_field_with_default_factory():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field has only `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str] = field(default_factory=str)
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
with pytest.raises(ValueError):
# Setter raises ValueError, as `wheels` will be a string by default
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_underscored_field_without_default_or_default_factory():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field has neither `default` or `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str] = field()
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_without_default_value():
"""
Using `property_wizard` when the dataclass has an underscored property,
and a public field *without* a default value explicitly set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str]
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_public_field_is_property():
"""
Using `property_wizard` when the dataclass has an underscored property,
and a public field is also defined as a property.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
wheels = property
# Defines the default value for `wheels`, since it won't work if we
# define it above. The `init=False` is needed since otherwise IDEs
# seem to suggest `_wheels` as a parameter to the constructor method,
# which shouldn't be the case.
#
# Note: if are *ok* with the default value for the type (0 in this
# case), then you can remove the below line and annotate the above
# line instead as `wheels: Union[int, str] = property`
_wheels: Union[int, str] = field(default=4, init=False)
@wheels
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 4
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_with_default():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has `default` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field(default=2)
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 2
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_with_default_factory():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has only `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field(default_factory=str)
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
with pytest.raises(ValueError):
# Setter raises ValueError, as `wheels` will be a string by default
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_without_default_or_default_factory():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has neither `default` or `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field()
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_where_annotated_type_contains_none():
"""
Using `property_wizard` when the annotated type for the dataclass field
associated with a property is here a :class:`Union` type that contains
`None`. As such, the field is technically an `Optional` so the default
value will be `None` if no value is specified via the constructor.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str, None]
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
# TypeError: int() argument is `None`
with pytest.raises(TypeError):
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_literal_type():
"""
Using `property_wizard` when the dataclass field associated with a
property is annotated with a :class:`Literal` type.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# Annotate `wheels` as a literal that should only be set to 1 or 0
# (similar to how the binary numeral system works, for example)
#
# Note: we can assign a default value for `wheels` explicitly, so that
# the IDE doesn't complain when we omit the argument to the
# constructor method, but it's technically not required.
wheels: Literal[1, '1', 0, '0']
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 1
# The IDE should display a warning (`wheels` only accepts [0, 1]), however
# it won't prevent the assignment here.
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
# The IDE should display no warning here, as this is an acceptable value
v = Vehicle('1')
log.debug(v)
assert v.wheels == 1, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_concrete_type():
"""
Using `property_wizard` when the dataclass field associated with a
property is annotated with a non-generic type, such as a `str` or `int`.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: int
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('1')
log.debug(v)
assert v.wheels == 1, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_concrete_type_and_default_factory_raises_type_error():
"""
Using `property_wizard` when the dataclass field associated with a
property is annotated with a non-generic type, such as a `datetime`, which
doesn't have a no-args constructor. Since `property_wizard` is not able to
instantiate a new `datetime`, the default value should be ``None``.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# Date when the vehicle was sold
sold_dt: datetime
@property
def _sold_dt(self) -> int:
return self._sold_dt
@_sold_dt.setter
def _sold_dt(self, sold_dt: datetime):
"""Save the datetime with the year set to `2010`"""
self._sold_dt = sold_dt.replace(year=2010)
# AttributeError: 'NoneType' object has no attribute 'replace'
with pytest.raises(AttributeError):
_ = Vehicle()
dt = datetime(2020, 1, 1, 12, 0, 0) # Jan. 1 2020 12:00 PM
expected_dt = datetime(2010, 1, 1, 12, 0, 0) # Jan. 1 2010 12:00 PM
v = Vehicle(sold_dt=dt)
log.debug(v)
assert v.sold_dt != dt
assert v.sold_dt == expected_dt, 'The constructor should use our setter ' \
'method'
dt = datetime.min
expected_dt = datetime.min.replace(year=2010)
v.sold_dt = dt
assert v.sold_dt == expected_dt, 'Expected assignment to use the setter ' \
'method'
def test_property_wizard_with_generic_type_which_is_not_supported():
"""
Using `property_wizard` when the dataclass field associated with a
property is annotated with a generic type other than one of the supported
types (e.g. Literal and Union).
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# Date when the vehicle was sold
sold_dt: ClassVar[datetime]
@property
def _sold_dt(self) -> int:
return self._sold_dt
@_sold_dt.setter
def _sold_dt(self, sold_dt: datetime):
"""Save the datetime with the year set to `2010`"""
self._sold_dt = sold_dt.replace(year=2010)
v = Vehicle()
log.debug(v)
dt = datetime(2020, 1, 1, 12, 0, 0) # Jan. 1 2020 12:00 PM
expected_dt = datetime(2010, 1, 1, 12, 0, 0) # Jan. 1 2010 12:00 PM
# TypeError: __init__() got an unexpected keyword argument 'sold_dt'
# Note: This is expected because the field for the property is a
# `ClassVar`, and even `dataclasses` excludes this annotated type
# from the constructor.
with pytest.raises(TypeError):
_ = Vehicle(sold_dt=dt)
# Our property should still work as expected, however
v.sold_dt = dt
assert v.sold_dt == expected_dt, 'Expected assignment to use the setter ' \
'method'
def test_property_wizard_with_mutable_types_v1():
"""
The `property_wizard` handles mutable collections (e.g. subclasses of list,
dict, and set) as expected. The defaults for these mutable types should
use a `default_factory` so we can observe the expected behavior.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: List[Union[int, str]]
# _wheels: List[Union[int, str]] = field(init=False)
inverse_bool_set: Set[bool]
# Not needed, but we can also define this as below if we want to
# inverse_bool_set: Annotated[Set[bool], field(default_factory=set)]
# We'll need the `field(default_factory=...)` syntax here, because
# otherwise the default_factory will be `defaultdict()`, which is not what
# we want.
wheels_dict: Annotated[
DefaultDict[str, List[str]],
field(default_factory=lambda: defaultdict(list))
]
@property
def wheels(self) -> List[int]:
return self._wheels
@wheels.setter
def wheels(self, wheels: List[Union[int, str]]):
self._wheels = [int(w) for w in wheels]
@property
def inverse_bool_set(self) -> Set[bool]:
return self._inverse_bool_set
@inverse_bool_set.setter
def inverse_bool_set(self, bool_set: Set[bool]):
# Confirm that we're passed in the right type when no value is set via
# the constructor (i.e. from the `property_wizard` metaclass)
assert isinstance(bool_set, set)
self._inverse_bool_set = {not b for b in bool_set}
@property
def wheels_dict(self) -> int:
return self._wheels_dict
@wheels_dict.setter
def wheels_dict(self, wheels: Union[int, str]):
self._wheels_dict = wheels
v1 = Vehicle(wheels=['1', '2', '3'],
inverse_bool_set={True, False},
wheels_dict=defaultdict(list, key=['value']))
v1.wheels_dict['key2'].append('another value')
log.debug(v1)
v2 = Vehicle()
v2.wheels.append(4)
v2.wheels_dict['a'].append('5')
v2.inverse_bool_set.add(True)
log.debug(v2)
v3 = Vehicle()
v3.wheels.append(1)
v3.wheels_dict['b'].append('2')
v3.inverse_bool_set.add(False)
log.debug(v3)
assert v1.wheels == [1, 2, 3]
assert v1.inverse_bool_set == {False, True}
assert v1.wheels_dict == {'key': ['value'], 'key2': ['another value']}
assert v2.wheels == [4]
assert v2.inverse_bool_set == {True}
assert v2.wheels_dict == {'a': ['5']}
assert v3.wheels == [1]
assert v3.inverse_bool_set == {False}
assert v3.wheels_dict == {'b': ['2']}
def test_property_wizard_with_mutable_types_v2():
"""
The `property_wizard` handles mutable collections (e.g. subclasses of list,
dict, and set) as expected. The defaults for these mutable types should
use a `default_factory` so we can observe the expected behavior.
In this version, we explicitly pass in the `field(default_factory=...)`
syntax for all field properties, though it's technically not needed.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Annotated[List[int], field(default_factory=list)]
_wheels_list: list = field(default_factory=list)
@property
def wheels_list(self) -> list:
return self._wheels_list
@wheels_list.setter
def wheels_list(self, wheels):
self._wheels_list = wheels
@property
def wheels(self) -> list:
return self._wheels
@wheels.setter
def wheels(self, wheels):
self._wheels = wheels
v1 = Vehicle(wheels=[1, 2], wheels_list=[2, 1])
v1.wheels.append(3)
v1.wheels_list.insert(0, 3)
log.debug(v1)
v2 = Vehicle()
log.debug(v2)
v2.wheels.append(2)
v2.wheels.append(1)
v2.wheels_list.append(1)
v2.wheels_list.append(2)
v3 = Vehicle()
log.debug(v3)
v3.wheels.append(1)
v3.wheels.append(1)
v3.wheels_list.append(5)
v3.wheels_list.append(5)
assert v1.wheels == [1, 2, 3]
assert v1.wheels_list == [3, 2, 1]
assert v2.wheels == [2, 1]
assert v2.wheels_list == [1, 2]
assert v3.wheels == [1, 1]
assert v3.wheels_list == [5, 5]
@pytest.mark.skipif(not PY39_OR_ABOVE, reason='requires Python 3.9 or higher')
def test_property_wizard_with_mutable_types_with_parameterized_standard_collections():
"""
Test case for mutable types with a Python 3.9 specific feature:
parameterized standard collections. As such, this test case is only
expected to pass for Python 3.9+.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: list[Union[int, str]]
# _wheels: List[Union[int, str]] = field(init=False)
inverse_bool_set: set[bool]
# Not needed, but we can also define this as below if we want to
# inverse_bool_set: Annotated[Set[bool], field(default_factory=set)]
# We'll need the `field(default_factory=...)` syntax here, because
# otherwise the default_factory will be `defaultdict()`, which is not what
# we want.
wheels_dict: Annotated[
defaultdict[str, List[str]],
field(default_factory=lambda: defaultdict(list))
]
@property
def wheels(self) -> List[int]:
return self._wheels
@wheels.setter
def wheels(self, wheels: List[Union[int, str]]):
self._wheels = [int(w) for w in wheels]
@property
def inverse_bool_set(self) -> Set[bool]:
return self._inverse_bool_set
@inverse_bool_set.setter
def inverse_bool_set(self, bool_set: Set[bool]):
# Confirm that we're passed in the right type when no value is set via
# the constructor (i.e. from the `property_wizard` metaclass)
assert isinstance(bool_set, set)
self._inverse_bool_set = {not b for b in bool_set}
@property
def wheels_dict(self) -> int:
return self._wheels_dict
@wheels_dict.setter
def wheels_dict(self, wheels: Union[int, str]):
self._wheels_dict = wheels
v1 = Vehicle(wheels=['1', '2', '3'],
inverse_bool_set={True, False},
wheels_dict=defaultdict(list, key=['value']))
v1.wheels_dict['key2'].append('another value')
log.debug(v1)
v2 = Vehicle()
v2.wheels.append(4)
v2.wheels_dict['a'].append('5')
v2.inverse_bool_set.add(True)
log.debug(v2)
v3 = Vehicle()
v3.wheels.append(1)
v3.wheels_dict['b'].append('2')
v3.inverse_bool_set.add(False)
log.debug(v3)
assert v1.wheels == [1, 2, 3]
assert v1.inverse_bool_set == {False, True}
assert v1.wheels_dict == {'key': ['value'], 'key2': ['another value']}
assert v2.wheels == [4]
assert v2.inverse_bool_set == {True}
assert v2.wheels_dict == {'a': ['5']}
assert v3.wheels == [1]
assert v3.inverse_bool_set == {False}
assert v3.wheels_dict == {'b': ['2']}
| 29.392917
| 105
| 0.635657
| 4,686
| 34,860
| 4.59347
| 0.070849
| 0.033821
| 0.049524
| 0.041812
| 0.797538
| 0.770035
| 0.75252
| 0.732729
| 0.718792
| 0.703136
| 0
| 0.020404
| 0.26331
| 34,860
| 1,185
| 106
| 29.417722
| 0.817764
| 0.254016
| 0
| 0.791908
| 0
| 0
| 0.094476
| 0
| 0
| 0
| 0
| 0
| 0.174855
| 1
| 0.137283
| false
| 0
| 0.011561
| 0.049133
| 0.297688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e86175919abeb092ddf35854c597c02131ebd4bd
| 23
|
py
|
Python
|
stat_tools/__init__.py
|
tilmantroester/simple_bootstrap
|
4cecd2a789adad7700842de02cb051c46af3647b
|
[
"MIT"
] | null | null | null |
stat_tools/__init__.py
|
tilmantroester/simple_bootstrap
|
4cecd2a789adad7700842de02cb051c46af3647b
|
[
"MIT"
] | null | null | null |
stat_tools/__init__.py
|
tilmantroester/simple_bootstrap
|
4cecd2a789adad7700842de02cb051c46af3647b
|
[
"MIT"
] | null | null | null |
from . import bootstrap
| 23
| 23
| 0.826087
| 3
| 23
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e86ab204920d37c7269fd123ac7a7323a5856aec
| 37
|
py
|
Python
|
download.py
|
easygittool/EasyGitTool
|
55ce8aaa6756715e864afdfb3b420d62eef84437
|
[
"Apache-2.0"
] | 1
|
2019-02-09T11:18:29.000Z
|
2019-02-09T11:18:29.000Z
|
download.py
|
easygittool/EasyGitTool
|
55ce8aaa6756715e864afdfb3b420d62eef84437
|
[
"Apache-2.0"
] | null | null | null |
download.py
|
easygittool/EasyGitTool
|
55ce8aaa6756715e864afdfb3b420d62eef84437
|
[
"Apache-2.0"
] | null | null | null |
import os
import
os.rename(src, dst)
| 12.333333
| 19
| 0.756757
| 7
| 37
| 4
| 0.714286
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 3
| 19
| 12.333333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2cd9d28ece47e42057e4d6218cb1fc0b40c58037
| 46,346
|
py
|
Python
|
surveyequivalence/scoring_functions.py
|
DavidXu999/surveyequivalence
|
83fc7f3095f7d58a32d5624afa8ec83895073277
|
[
"MIT"
] | null | null | null |
surveyequivalence/scoring_functions.py
|
DavidXu999/surveyequivalence
|
83fc7f3095f7d58a32d5624afa8ec83895073277
|
[
"MIT"
] | 3
|
2021-11-30T22:57:59.000Z
|
2022-02-03T04:44:45.000Z
|
surveyequivalence/scoring_functions.py
|
DavidXu999/surveyequivalence
|
83fc7f3095f7d58a32d5624afa8ec83895073277
|
[
"MIT"
] | 1
|
2021-07-28T14:27:18.000Z
|
2021-07-28T14:27:18.000Z
|
import random
from abc import ABC, abstractmethod
from math import log2
from typing import Sequence
import numpy as np
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
from .combiners import DiscreteDistributionPrediction, NumericPrediction, DiscretePrediction
frac_cache = dict()
def frac(n:int):
"""
Calculate the frac: n!
"""
if n <= 0:
return 1
if n in frac_cache:
return frac_cache[n]
frac_cache[n] = frac(n-1) * n
return frac_cache[n]
def comb(m:int,n:int):
"""
Calculate the combination number: pick m items from n items
"""
if m>n:
return 0
elif m==n:
return 1
return (frac(n)/frac(n-m))/frac(m)
def mode(data):
"""
Calculate the mode in the data.
"""
freq = dict()
for i in data:
if not i in freq:
freq[i]=1
else:
freq[i]+=1
max_freq = max(freq.values())
modes = [k for k in freq if freq[k] == max_freq]
return np.random.choice(modes,1,replace=False)[0]
class Scorer(ABC):
"""
Scorer Class. An abstract class.
Parameters
----------
num_virtual_raters: the number of virtual raters drawn when calculating the score. Higher num_virtural_rater makes the varience of score lower.
num_ref_raters_per_virtual_rater: A virtual rater is the combined rating of a randomly selected set of num_ref_raters_per_virtual_rater non-null ratings for each column
ref_rater_combiner: The way to combine the ref_raters. Default: Combine with majority vote for discrete labels; mean for continuous labels
verbosity: verbosity value from 1 to 4 indicating increased verbosity.
"""
@abstractmethod
def __init__(self,
num_virtual_raters=100,
num_ref_raters_per_virtual_rater=1,
ref_rater_combiner="majority_vote",
verbosity=0):
self.num_virtual_raters = num_virtual_raters
self.num_ref_raters_per_virtual_rater = num_ref_raters_per_virtual_rater
self.ref_rater_combiner = ref_rater_combiner
self.verbosity = verbosity
pass
@staticmethod
@abstractmethod
def score(classifier_predictions: Sequence,
rater_labels: Sequence) -> float:
pass
def expected_score_anonymous_raters(self,
classifier_predictions,
W,
num_virtual_raters=None,
num_ref_raters_per_virtual_rater=None,
ref_rater_combiner=None,
verbosity=None):
"""
This implementation generates sample virtual raters, scores each, and takes the mean
Some scoring functions override this with a closed-form solution for the expectation
Parameters
----------
classifier_predictions: Scoring predictions
W: The item and rating dataset
num_virtual_raters: (the same with instance property if None) the number of virtual raters drawn when calculating the score. Higher num_virtural_rater makes the varience of score lower.
num_ref_raters_per_virtual_rater: (the same with instance property if None) A virtual rater is the combined rating of a randomly selected set of num_ref_raters_per_virtual_rater non-null ratings for each column
ref_rater_combiner: (the same with instance property if None) The way to combine the ref_raters. Default: Combine with majority vote for discrete labels; mean for continuous labels
verbosity: (the same with instance property if None) verbosity value from 1 to 4 indicating increased verbosity.
Returns
-------
A scalar expected score
"""
if not num_virtual_raters:
num_virtual_raters = self.num_virtual_raters
if not num_ref_raters_per_virtual_rater:
num_ref_raters_per_virtual_rater = self.num_ref_raters_per_virtual_rater
if not ref_rater_combiner:
ref_rater_combiner = self.ref_rater_combiner
if not verbosity:
verbosity = self.verbosity
# create a bunch of virtual raters (samples)
# for each virtual rater, pick a random combination randomly selected set of num_ref_raters_per_virtual_rater non-null ratings for each column
virtual_raters_collection = []
if ref_rater_combiner=="majority_vote":
for _, virtual_rater_i in W.iterrows():
vals = virtual_rater_i.dropna().values
if len(vals) > 0:
ratings_for_i = []
num = min(len(vals),num_ref_raters_per_virtual_rater)
for _ in range(num_virtual_raters):
#select num_ref_raters_per_virtual_rater reference raters, and combine them to produce virtual rater label
ratings_for_i.append(mode(np.random.choice(vals, num, replace=True)))
virtual_raters_collection.append(ratings_for_i)
else:
raise NotImplementedError()
# one row for each item; num_virtual_raters columns
virtual_raters_matrix = np.array(virtual_raters_collection)
# iterate through the columns (virtual raters) of samples_matrix, scoring each
scores = [self.score(classifier_predictions, virtual_rater) for virtual_rater in virtual_raters_matrix.T]
non_null_scores = [score for score in scores if not pd.isna(score)]
if len(non_null_scores) == 0:
if verbosity > 2:
print("\t\t\tNo non-null scores")
return None
# take average score across virtual rateres
retval = sum(non_null_scores) / len(non_null_scores)
if verbosity > 2:
print(f"\t\tnon_null_scores = {non_null_scores}; returning mean: {retval}")
return retval
def expected_score_non_anonymous_raters(self,
classifier_predictions,
W,
verbosity=None):
"""
A virtual rater is a column of W
Parameters
----------
classifier_predictions: Scoring predictions
W: The item and rating dataset
verbosity: (the same with instance property if None) verbosity value from 1 to 4 indicating increased verbosity.
Returns
-------
A scalar expected score
"""
if not verbosity:
verbosity = self.verbosity
# one sample for each column
scores = [self.score(classifier_predictions, W[col]) for col in W.columns]
non_null_scores = [score for score in scores if not pd.isna(score)]
if len(non_null_scores) == 0:
if verbosity > 2:
print("\t\t\tNo non-null scores")
return None
retval = sum(non_null_scores) / len(non_null_scores)
if verbosity > 2:
print(f"\t\tnon_null_scores = {non_null_scores}; returning mean: {retval}")
return retval
def expected_score(self,
classifier_predictions: Sequence,
raters: Sequence,
W,
anonymous=False,
verbosity=None):
"""
Computes the expected score of the classifier against a random rater.
With anonymous flag, compute expected score against a randomly selected label for each item
With non-anonymous, compute the expected score against a randomly selected column.
Parameters
----------
classifier_predictions: Predictions to be scored
raters: Which columns of W to use as reference raters to score the predictions against
W: The item and rating dataset.
anonymous: if False, then a random rater is a column from W; if True, then labels in a column are
not necessarily from the same rater.
verbosity: (the same with instance property if None) verbosity value from 1 to 4 indicating increased printed feedback during execution.
Returns
-------
Expected score of the classifier against a random rater.
"""
if not verbosity:
verbosity = self.verbosity
if verbosity > 2:
print(f"\t\tScoring predictions = {classifier_predictions} vs. ref raters {raters}")
if verbosity > 4:
print(f"ref_ratings = \n{W.loc[:, list(raters)]}")
if not anonymous:
return self.expected_score_non_anonymous_raters(classifier_predictions, W[raters], verbosity=verbosity)
else:
return self.expected_score_anonymous_raters(classifier_predictions, W[raters], verbosity=verbosity)
class Scorer_for_Hard_Classifier(Scorer):
"""
Scorer class for hard classifier (whose output is a DiscretePrediction)
Computes numeric socre for a sequence of classifier DiscretePredictions:
- .score() yields actual score against a sequence of reference labels
- .expected_score() yields expected score against a matrix of reference labels
Note that the current implementation of survey equivalence centering on c_0 and plotting
both assume that higher scores are better.
"""
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
pass
@staticmethod
@abstractmethod
def score(classifier_predictions: Sequence[DiscretePrediction],
rater_labels: Sequence) -> float:
pass
def expected_score_anonymous_raters(self, classifier_predictions: Sequence[DiscretePrediction], W, num_virtual_raters=None, num_ref_raters_per_virtual_rater=None, ref_rater_combiner=None, verbosity=None):
return super().expected_score_anonymous_raters(classifier_predictions, W, num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_non_anonymous_raters(self, classifier_predictions: Sequence[DiscretePrediction], W, verbosity=None):
return super().expected_score_non_anonymous_raters(classifier_predictions, W, verbosity=verbosity)
def expected_score(self, classifier_predictions: Sequence[DiscretePrediction], raters: Sequence, W, anonymous=False, verbosity=None):
return super().expected_score(classifier_predictions, raters, W, anonymous=anonymous, verbosity=verbosity)
class Scorer_for_Soft_Classifier(Scorer):
"""
Scorer class for soft classifier (whose output is a DiscreteDistributionPrediction)
Computes numeric socre for a sequence of classifier DiscreteDistributionPredictions:
- .score() yields actual score against a sequence of reference labels
- .expected_score() yields expected score against a matrix of reference labels
Note that the current implementation of survey equivalence centering on c_0 and plotting
both assume that higher scores are better. Currently, this only affects the CrossEntropy scorer,
which we have negated from the traditional definition.
"""
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
pass
@staticmethod
@abstractmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence) -> float:
pass
def expected_score_anonymous_raters(self, classifier_predictions: Sequence[DiscreteDistributionPrediction], W, num_virtual_raters=None, num_ref_raters_per_virtual_rater=None, ref_rater_combiner=None, verbosity=None):
return super().expected_score_anonymous_raters(classifier_predictions, W, num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_non_anonymous_raters(self, classifier_predictions: Sequence[DiscreteDistributionPrediction], W, verbosity=None):
return super().expected_score_non_anonymous_raters(classifier_predictions, W, verbosity=verbosity)
def expected_score(self, classifier_predictions: Sequence[DiscreteDistributionPrediction], raters: Sequence, W, anonymous=False, verbosity=None):
return super().expected_score(classifier_predictions, raters, W, anonymous=anonymous, verbosity=verbosity)
class Scorer_for_Numeric_Classifier(Scorer):
"""
Scorer class for numeric classifier (whose output is a NumericPrediction)
Computes numeric socre for a sequence of classifier NumericPredictions:
- .score() yields actual score against a sequence of reference labels
- .expected_score() yields expected score against a matrix of reference labels
Note that the current implementation of survey equivalence centering on c_0 and plotting
both assume that higher scores are better. Currently, this only affects the CrossEntropy scorer,
which we have negated from the traditional definition.
"""
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="mean", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
pass
@staticmethod
@abstractmethod
def score(classifier_predictions: Sequence[NumericPrediction],
rater_labels: Sequence) -> float:
pass
def expected_score_anonymous_raters(self, classifier_predictions: Sequence[NumericPrediction], W, num_virtual_raters=None, num_ref_raters_per_virtual_rater=None, ref_rater_combiner=None, verbosity=None):
return super().expected_score_anonymous_raters(classifier_predictions, W, num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_non_anonymous_raters(self, classifier_predictions: Sequence[NumericPrediction], W, verbosity=None):
return super().expected_score_non_anonymous_raters(classifier_predictions, W, verbosity=verbosity)
def expected_score(self, classifier_predictions: Sequence[NumericPrediction], raters: Sequence, W, anonymous=False, verbosity=None):
return super().expected_score(classifier_predictions, raters, W, anonymous=anonymous, verbosity=verbosity)
class Correlation(Scorer_for_Numeric_Classifier):
"""
Computes the Pearson correlation coefficient.
"""
def __init__(self):
super().__init__()
@staticmethod
def score(classifier_predictions: Sequence[NumericPrediction],
rater_labels: Sequence[str],
verbosity=0
):
"""
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
Returns
-------
Pearson correlation coefficient
"""
if verbosity > 3:
print(f'\t\t\tcorrelation: preds={classifier_predictions}, labels={list(rater_labels)}')
if len(classifier_predictions) != len(rater_labels):
print("ALERT: classifier_prediction and rater_labels not of same length; skipping")
print("")
return None
# have to remove items where either pred or label is missing
good_items = [(pred.value, label) \
for (pred, label) in zip(classifier_predictions, rater_labels) \
if pred and (not pd.isna(pred.value)) and (not pd.isna(label))]
if len(good_items) == 0:
if verbosity > 0:
print("ALERT: no items with both prediction and label; skipping\n")
return None
else:
# note that zip(*tups) unzips a list of tuples
non_null_preds, non_null_labels = zip(*good_items)
if verbosity > 3:
print(f'\t\t\tcorrelation: non null preds={non_null_preds}, non null labels={list(non_null_labels)}')
# [convert_to_number(l) for l in rater_labels]
retval = np.corrcoef(non_null_preds, non_null_labels)[1, 0]
if verbosity > 2:
print(f"\t\t\tcorrelation: returning score = {retval}")
return retval
class AgreementScore(Scorer_for_Hard_Classifier):
"""
Agreement Scorer. Discrete labels and predictions
"""
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_anonymous_raters(self,
classifier_predictions,
W,
num_virtual_raters=None,
num_ref_raters_per_virtual_rater=None,
ref_rater_combiner=None,
verbosity=None):
"""
A virtual rater is a majority vote from a group of num_ref_raters_per_virtual_rater randomly selected non-null ratings.
Closed-form solution for the expectation, so we ignore the num_virtual_raters parameter
Parameters
----------
classifier_predictions: Scoring predictions
W: The item and rating dataset
verbosity: verbosity value from 1 to 4 indicating increased verbosity.
Returns
-------
A scalar expected score
"""
if not num_virtual_raters:
num_virtual_raters = self.num_virtual_raters
if not num_ref_raters_per_virtual_rater:
num_ref_raters_per_virtual_rater = self.num_ref_raters_per_virtual_rater
if not ref_rater_combiner:
ref_rater_combiner = self.ref_rater_combiner
if not verbosity:
verbosity = self.verbosity
# iterate through the rows
# for each row:
# get the frequency of matches among the ratings
tot = 0
ct = 0
for (row, pred) in zip([row for _, row in W.iterrows()], classifier_predictions):
# count frequency of each value
counts = row.dropna().value_counts()
tot_counts=np.sum(counts)
if len(counts) == 0:
# no non-null labels for this item; skip it
continue
# NOTE: the fast combination calculation for majority vote rule is only for binary case
if len(counts) > 2 and num_ref_raters_per_virtual_rater > 1:
return super().expected_score_anonymous_raters(classifier_predictions,W,num_virtual_raters=num_virtual_raters,num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater,ref_rater_combiner=ref_rater_combiner,verbosity=verbosity)
# majority vote of the reference panel for particular label: freqs[]
freqs = counts/np.sum(counts)
for label, count in counts.items():
# calculate the probability of majority vote's outcomes
sum = 0
for ii in range(int((num_ref_raters_per_virtual_rater)/2)+1):
i = int((num_ref_raters_per_virtual_rater+1)/2) + ii
# i is the number of votes
# if there is a tie, choose one randomly
# pick i from the current label, and the rest from other labels
if i*2 == num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)/2
# else
elif i*2 > num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)
freqs[label]=sum/comb(num_ref_raters_per_virtual_rater,tot_counts)
ct += 1
if pred.value in freqs:
tot += freqs[pred.value]
else:
# predicted label never occurred in row, so no agreements to add to tot, but still increment ct
pass
if ct > 0:
return tot / ct
else:
return None
@staticmethod
def score(classifier_predictions: Sequence[DiscretePrediction],
rater_labels: Sequence[str],
verbosity=0):
"""
Agreement score measures the normalized number of times that the predictor matched the label. Akin to a typical
accuracy score.
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
Returns
-------
Agreement score
"""
assert len(classifier_predictions) == len(rater_labels)
tot_score = sum([pred.value == label for (pred, label) in \
zip(classifier_predictions, rater_labels)]) / \
len(classifier_predictions)
return tot_score
class CrossEntropyScore(Scorer_for_Soft_Classifier):
"""
Cross Entropy Scorer
"""
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_anonymous_raters(self,
classifier_predictions,
W,
num_virtual_raters=None,
num_ref_raters_per_virtual_rater=None,
ref_rater_combiner=None,
verbosity=None):
"""
A virtual rater is a majority vote from a group of num_ref_raters_per_virtual_rater randomly selected non-null ratings.
Closed-form solution for the expectation, so we ignore the num_virtual_raters parameter
Parameters
----------
classifier_predictions: Scoring predictions
W: The item and rating dataset
verbosity: verbosity value from 1 to 4 indicating increased verbosity.
Returns
-------
A scalar expected score
"""
if not num_virtual_raters:
num_virtual_raters = self.num_virtual_raters
if not num_ref_raters_per_virtual_rater:
num_ref_raters_per_virtual_rater = self.num_ref_raters_per_virtual_rater
if not ref_rater_combiner:
ref_rater_combiner = self.ref_rater_combiner
if not verbosity:
verbosity = self.verbosity
# iterate through the rows
# for each row:
# -- get the probability of each label
# -- use those as weights, with score for when that label happens
tot = 0
ct = 0
for (row, pred) in zip([row for _, row in W.iterrows()], classifier_predictions):
# count frequency of each value
counts = row.dropna().value_counts()
tot_counts=np.sum(counts)
if len(counts) == 0:
# no non-null labels for this item; skip it
continue
# NOTE: the fast combination calculation for majority vote rule is only for binary case
if len(counts) > 2 and num_ref_raters_per_virtual_rater > 1:
return super().expected_score_anonymous_raters(classifier_predictions,W,num_virtual_raters=num_virtual_raters,num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater,ref_rater_combiner=ref_rater_combiner,verbosity=verbosity)
# majority vote of the reference panel for particular label: freqs[]
freqs = counts/np.sum(counts)
for label, count in counts.items():
# calculate the probability of majority vote's outcomes
sum = 0
for ii in range(int((num_ref_raters_per_virtual_rater)/2)+1):
i = int((num_ref_raters_per_virtual_rater+1)/2) + ii
# i is the number of votes
# if there is a tie, choose one randomly
# pick i from the current label, and the rest from other labels
if i*2 == num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)/2
# else
elif i*2 > num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)
freqs[label]=sum/comb(num_ref_raters_per_virtual_rater,tot_counts)
item_tot = 0
for label, freq in freqs.items():
# We use the negated cross-entropy, so that higher scores will be better,
# which is true of all the other scoring functions.
# If we used the standard cross-entropy, scores would be positive, and higher scores would be worse
# Several things would have to be generalized in equivalence.py to allow for higher scores
# being worse, including plotting and centering.
score = freq * log2(pred.label_probability(label))
item_tot += score
tot += item_tot
ct += 1
if ct > 0:
return tot / ct
else:
return None
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0):
"""
Calculates the Cross Entropy of the two labels.
>>> CrossEntropyScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'])
0.594597099859
>>> CrossEntropyScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'])
0.87702971998
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
Returns
-------
Cross Entropy score
"""
assert len(classifier_predictions) == len(rater_labels);
if verbosity > 2:
print(f'\n-------\n\t\tpredictions: {classifier_predictions[:10]}')
print(f'\n--------\n\t\tlabels: {rater_labels[:10]}')
def item_score(pred, label):
if pred is None: return None
if label is None: return None
return log2(pred.label_probability(label))
# if pred.value == label:
# return log2(pred.label_probability(label))
# else:
# return (log2(1-pred.label_probability(label)))
# compute mean score over all items
seq = list()
for (pred, label) in zip(classifier_predictions, rater_labels):
score = item_score(pred, label)
if score is not None:
seq.append(score)
if len(seq) == 0: return None
return np.mean(seq) # Scorer.rob_median_of_means(pd.Series(seq), 1)
class PrecisionScore(Scorer):
"""
Only implemented for binary labels where one of the labels is "pos" and binary predictions.
Calculate the expected probability of (pos rating | pos prediction).
(True positives divided by all positives).
"""
def __init__(self):
super().__init__()
def expected_score_anonymous_raters(self,
classifier_predictions,
W,
num_virtual_raters=None,
verbosity=0):
"""
A virtual rater is a randomly selected non-null rating for each column.
Closed-form solution for the expectation, so we ignore the num_virtual_raters parameter
Parameters
----------
classifier_predictions: Scoring predictions
W: The item and rating dataset
verbosity: verbosity value from 1 to 4 indicating increased verbosity.
Returns
-------
A scalar expected score
"""
# iterate through the rows
# for each row:
# -- count it only if the prediction is "positive"
# -- get the frequency of positive among the ratings
tot = 0
ct = 0
for (row, pred) in zip([row for _, row in W.iterrows()], classifier_predictions):
# count frequency of each value
counts = row.dropna().value_counts()
freqs = counts/np.sum(counts)
if len(counts) == 0:
# no non-null labels for this item
continue
elif pred.value != "pos":
# no impact on precision if classifier didn't predict positive
continue
tot += freqs['pos']
ct += 1
if ct > 0:
return tot / ct
else:
return None
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0,
average: str = 'micro') -> float:
"""
Precision score. This function uses sklearn's precision function.
>>> PrecisionScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'], 'micro')
0.6666666666666666
>>> PrecisionScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'], 'micro')
0.3333333333333333
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
average: macro or micro averaging
Returns
-------
Precision Score
"""
assert len(classifier_predictions) == len(rater_labels);
if verbosity > 2:
print(f'\n-------\n\t\tpredictions: {classifier_predictions[:10]}')
print(f'\n--------\n\t\tlabels: {rater_labels[:10]}')
new_pred = list()
new_label = list()
for (pred, label) in zip(classifier_predictions, rater_labels):
if pred is not None and label is not None:
new_pred.append(pred)
new_label.append(label)
return precision_score(new_label, [p.value for p in new_pred], average=average)
class RecallScore(Scorer):
def __init__(self):
super().__init__()
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0,
average: str = 'micro') -> float:
"""
Recall score. This function uses sklearn's recall function.
>>> RecallScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'], 'micro')
0.6666666666666666
>>> RecallScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'], 'macro')
0.5
>>> RecallScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'], 'micro')
0.3333333333333333
>>> RecallScore.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'], 'macro')
0.25
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
average: macro or micro averaging
Returns
-------
Recall Score
"""
assert len(classifier_predictions) == len(rater_labels);
if verbosity > 2:
print(f'\n-------\n\t\tpredictions: {classifier_predictions[:10]}')
print(f'\n--------\n\t\tlabels: {rater_labels[:10]}')
new_pred = list()
new_label = list()
for (pred, label) in zip(classifier_predictions, rater_labels):
if pred is not None and label is not None:
new_pred.append(pred)
new_label.append(label)
return recall_score(new_label, [p.value for p in new_pred], average=average)
class F1Score(Scorer):
def __init__(self):
super().__init__()
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0,
average: str = 'micro') -> float:
"""
F1 score. This function uses sklearn's F1 function.
>>> F1Score.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'], 'micro')
0.6666666666666666
>>> F1Score.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['b', 'b', 'b'], 'macro')
0.39759036144
>>> F1Score.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'], 'micro')
0.3333333333333333
>>> F1Score.score([DiscreteDistributionPrediction(['a', 'b'], prs) for prs in [[.3, .7], [.4, .6], [.6, .4]]], ['a', 'b', 'b'], 'macro')
0.25
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
average: macro or micro averaging
Returns
-------
F1 Score
"""
assert len(classifier_predictions) == len(rater_labels);
if verbosity > 2:
print(f'\n-------\n\t\tpredictions: {classifier_predictions[:10]}')
print(f'\n--------\n\t\tlabels: {rater_labels[:10]}')
new_pred = list()
new_label = list()
for (pred, label) in zip(classifier_predictions, rater_labels):
if pred is not None and label is not None:
new_pred.append(pred)
new_label.append(label)
return f1_score(new_label, [p.value for p in new_pred], average=average)
class AUCScore(Scorer):
def __init__(self):
super().__init__()
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0) -> float:
"""
AUC score. This function uses sklearn's AUC function, but does not work in many cases with multiple labels.
Parameters
----------
classifier_predictions: numeric values
rater_labels: sequence of labels, which should be numeric values
verbosity:
Returns
-------
AUC Score
"""
assert len(classifier_predictions) == len(rater_labels);
if verbosity > 2:
print(f'\n-------\n\t\tpredictions: {classifier_predictions[:10]}')
print(f'\n--------\n\t\tlabels: {rater_labels[:10]}')
new_pred = list()
new_label = list()
for (pred, label) in zip(classifier_predictions, rater_labels):
if pred is not None and label is not None:
new_pred.append(pred)
new_label.append(label)
if len(set(new_label)) == 1:
return np.nan
if len(set(new_label)) == 2:
return roc_auc_score(new_label, [p.value_prob for p in new_pred])
if len(set(new_label)) > 2:
print("multiclass AUC not implemented")
return np.nan
class DMIScore_for_Hard_Classifier(Scorer_for_Hard_Classifier):
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_anonymous_raters(self, classifier_predictions: Sequence[DiscretePrediction], W, num_virtual_raters=None, num_ref_raters_per_virtual_rater=None, ref_rater_combiner=None, verbosity=None):
if not num_virtual_raters:
num_virtual_raters = self.num_virtual_raters
if not num_ref_raters_per_virtual_rater:
num_ref_raters_per_virtual_rater = self.num_ref_raters_per_virtual_rater
if not ref_rater_combiner:
ref_rater_combiner = self.ref_rater_combiner
if not verbosity:
verbosity = self.verbosity
W_np = W.to_numpy()
# Use index to represent the labels
label_to_idx = dict()
idx = 0
for pred in classifier_predictions:
if pred.value not in label_to_idx:
label_to_idx[pred.value] = idx
idx += 1
for item_labels in W_np:
for label in item_labels:
if label not in label_to_idx:
label_to_idx[label] = idx
idx += 1
if num_ref_raters_per_virtual_rater>1 and idx>2:
raise NotImplementedError()
# calculate the freq matrix
freqs_matrix = np.zeros((idx,idx))
for (row, pred) in zip([row for _, row in W.iterrows()], classifier_predictions):
counts = row.dropna().value_counts()
tot_counts=np.sum(counts)
label_prob = np.zeros(idx)
for label, count in counts.items():
# calculate the probability of majority vote's outcomes
sum = 0
for ii in range(int((num_ref_raters_per_virtual_rater)/2)+1):
i = int((num_ref_raters_per_virtual_rater+1)/2) + ii
# i is the number of votes
# if there is a tie, choose one randomly
# pick i from the current label, and the rest from other labels
if i*2 == num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)/2
# else
elif i*2 > num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)
label_prob[label_to_idx[label]]=sum/comb(num_ref_raters_per_virtual_rater,tot_counts)
freqs_matrix[label_to_idx[pred.value]] += label_prob
# normalization
freqs_matrix = freqs_matrix / np.sum(freqs_matrix)
DMI=np.abs(np.linalg.det(freqs_matrix))
return DMI
@staticmethod
def score(classifier_predictions: Sequence[DiscretePrediction],
rater_labels: Sequence[str],
verbosity=0) -> float:
"""
DMI score.
Parameters
----------
classifier_predictions: the (hard) classifier's predictions for all items
rater_labels: sequence of labels from the reference rater
verbosity:
Returns
-------
DMI Score
"""
assert len(classifier_predictions) == len(rater_labels)
# Use index to represent the labels
label_to_idx = dict()
idx = 0
for pred in classifier_predictions:
if pred.value not in label_to_idx:
label_to_idx[pred.value] = idx
idx += 1
for label in rater_labels:
if label not in label_to_idx:
label_to_idx[label] = idx
idx += 1
# calculate the freq matrix
freqs_matrix = np.zeros((idx,idx))
for (pred, label) in zip(classifier_predictions, rater_labels):
freqs_matrix[label_to_idx[pred.value]][label_to_idx[label]] += 1
# normalization
freqs_matrix = freqs_matrix / np.sum(freqs_matrix)
DMI=np.abs(np.linalg.det(freqs_matrix))
return DMI
class DMIScore_for_Soft_Classifier(Scorer_for_Soft_Classifier):
def __init__(self, num_virtual_raters=100, num_ref_raters_per_virtual_rater=1, ref_rater_combiner="majority_vote", verbosity=0):
super().__init__(num_virtual_raters=num_virtual_raters, num_ref_raters_per_virtual_rater=num_ref_raters_per_virtual_rater, ref_rater_combiner=ref_rater_combiner, verbosity=verbosity)
def expected_score_anonymous_raters(self, classifier_predictions: Sequence[DiscreteDistributionPrediction], W, num_virtual_raters=None, num_ref_raters_per_virtual_rater=None, ref_rater_combiner=None, verbosity=None):
if not num_virtual_raters:
num_virtual_raters = self.num_virtual_raters
if not num_ref_raters_per_virtual_rater:
num_ref_raters_per_virtual_rater = self.num_ref_raters_per_virtual_rater
if not ref_rater_combiner:
ref_rater_combiner = self.ref_rater_combiner
if not verbosity:
verbosity = self.verbosity
W_np = W.to_numpy()
# Create a dictionary to map label names to enumerated index values (0, 1 for binary labels)
num_distinct_labels = len(classifier_predictions[0].label_names)
label_idx_map = dict(zip(classifier_predictions[0].label_names,range(num_distinct_labels)))
# if W has any labels that are not in the classifier's output, we have an all 0 column in the matrix
# and DMI is 0
for item_labels in W_np:
for label in item_labels:
if label not in label_idx_map:
return 0
if num_ref_raters_per_virtual_rater>1 and num_distinct_labels>2:
raise NotImplementedError()
# calculate the freq matrix; joint distribution of classifier output and reference rater labels
freqs_matrix = np.zeros((num_distinct_labels, num_distinct_labels))
for (row, pred) in zip([row for _, row in W.iterrows()], classifier_predictions):
# each row is one item
counts = row.dropna().value_counts() # a dict that maps from label names to frequency of that label among reference raters
tot_counts=np.sum(counts)
# if target panel size is 1, we could work directly with counts/tot_counts as probabilities.
# more generally, we need the probabilities of different majority vote outcomes
# rather than probabilities of different labels from individual raters
# majority_prob will be a mapping from labels to the probability of a majority of raters giving that label
majority_prob = np.zeros(num_distinct_labels)
for label, count in counts.items():
sum = 0
for ii in range(int((num_ref_raters_per_virtual_rater)/2)+1):
i = int((num_ref_raters_per_virtual_rater+1)/2) + ii
# i is the number of votes
# if there is a tie, choose one randomly
# pick i from the current label, and the rest from other labels
if i*2 == num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)/2
# else
elif i*2 > num_ref_raters_per_virtual_rater:
sum += comb(i,count)*comb(num_ref_raters_per_virtual_rater-i,tot_counts-count)
majority_prob[label_idx_map[label]]=sum/comb(num_ref_raters_per_virtual_rater,tot_counts)
# get joint probability distribution of classifier output and target panel output for this item
# add that to the accumulating overall matrix; we will normalize later to make it a joint probability distribution
freqs_matrix += np.array(pred.probabilities).reshape(-1,1) * majority_prob
# normalization
freqs_matrix = freqs_matrix / np.sum(freqs_matrix)
# DMI is determinant of the normalized matrix
DMI=np.abs(np.linalg.det(freqs_matrix))
return DMI
@staticmethod
def score(classifier_predictions: Sequence[DiscreteDistributionPrediction],
rater_labels: Sequence[str],
verbosity=0) -> float:
"""
DMI score.
Parameters
----------
classifier_predictions: the (soft) classifier's predictions for all items
rater_labels: sequence of labels from the reference rater
verbosity:
Returns
-------
DMI Score
"""
assert len(classifier_predictions) == len(rater_labels)
# Use index to represent the labels
idx = len(classifier_predictions[0].label_names)
label_to_idx = dict(zip(classifier_predictions[0].label_names,range(idx)))
for label in rater_labels:
if label not in label_to_idx:
return 0
# calculate the freq matrix
freqs_matrix = np.zeros((idx,idx))
for (pred, label) in zip(classifier_predictions, rater_labels):
freqs_matrix[label_to_idx[label]] += pred.probabilities
# normalization
freqs_matrix = freqs_matrix / np.sum(freqs_matrix)
DMI=np.abs(np.linalg.det(freqs_matrix))
return DMI
| 42.363803
| 250
| 0.636064
| 5,660
| 46,346
| 4.965901
| 0.075265
| 0.046963
| 0.04184
| 0.0523
| 0.796919
| 0.773259
| 0.748817
| 0.735689
| 0.710748
| 0.699932
| 0
| 0.012973
| 0.27983
| 46,346
| 1,094
| 251
| 42.363803
| 0.829134
| 0.302378
| 0
| 0.73271
| 0
| 0.001869
| 0.042715
| 0.018146
| 0
| 0
| 0
| 0
| 0.014953
| 1
| 0.08785
| false
| 0.016822
| 0.014953
| 0.016822
| 0.216822
| 0.042991
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ce2eee8d89f06ffa737492055a7d3050e36fa30
| 47
|
py
|
Python
|
sandbox/utils.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | null | null | null |
sandbox/utils.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | 3
|
2022-02-15T23:32:52.000Z
|
2022-03-28T21:35:12.000Z
|
sandbox/utils.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | null | null | null |
def testFunction ( var ):
return var + 10
| 11.75
| 25
| 0.617021
| 6
| 47
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.297872
| 47
| 3
| 26
| 15.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2ce89d44b2c024f94cf4c603ae079d5d050a4038
| 2,096
|
py
|
Python
|
tests/test_rsa_crt_fault_attack.py
|
timgates42/featherduster
|
76954f234d9056bbaafa00b066725c7d8de9c9c4
|
[
"BSD-3-Clause"
] | 1,026
|
2016-05-05T17:27:39.000Z
|
2022-03-31T20:58:42.000Z
|
tests/test_rsa_crt_fault_attack.py
|
timgates42/featherduster
|
76954f234d9056bbaafa00b066725c7d8de9c9c4
|
[
"BSD-3-Clause"
] | 82
|
2016-05-06T13:02:14.000Z
|
2022-03-27T19:51:25.000Z
|
tests/test_rsa_crt_fault_attack.py
|
timgates42/featherduster
|
76954f234d9056bbaafa00b066725c7d8de9c9c4
|
[
"BSD-3-Clause"
] | 165
|
2016-05-05T17:33:38.000Z
|
2022-03-10T01:39:14.000Z
|
import cryptanalib as ca
d = 8276730537354063873046700086606934013390657907137764360442957114148983703392213920905379521933961200662349418726972470589061811064510563973168097131091762900935884000158700608857487894152877574467022635471921054922438416372847738044720341171693728509510304733152582025735997457366640962042446371702796650429270558556364971206673220641102736535785495085270088144553219777365472362543536538109527913412532336001148762959211359919986044869355021796538441331705044929239323402165504467704827987337504638532978042282749691349759655331384162387839085847376059025035741851624666333548281288846373129193232087364721195558785
N = 10582801803234222023721351326204905463469321586937873866285899804705438289550794516434678042160644904163578778193959630189545495390487529871810472562828953980775617092992054819000569379567214328036625223601593768074993103126020801627845266883277896935534900134552457628778290646235319742652095517837642111737039755900571013123362947713434696280050067990761280634722593133366154815295209776044603345322953117132250103892984014819690279192133597065661729463436631976729813726128619125291814410455706189604700693994757946995645898522397403679829590407298131095142520403589090633192841184074504222946814847796446760184239L
e = 0x10001
badsig = 8864006670929406852925128423419283000101606264931546544565004118285111530579411512945707908758302944358049245159881240365195456784997978690049780123607312704598614883100338341770324555702622215028874055295233188087028972047396168538381606028425952850084870840815139465808335221537979841948906784351532564396334588129879467099436972117704102514869368975631607984728653576512470131900790248497082055162110680555834874277167971776484142044468048689414584647613377242581544618782866426893967031773962301853433165727478082390333355401924553292504003746966703113454750999893154821514523123639279610673613179496062870087972L
message = 2
print 'Testing RSA-CRT fault attack...'
try:
assert(ca.rsa_crt_fault_attack(badsig, message, N, verbose=True) == d)
except:
exit('RSA-CRT fault attack is broken!')
| 116.444444
| 626
| 0.962309
| 41
| 2,096
| 49.121951
| 0.682927
| 0.008937
| 0.016385
| 0.025323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.906693
| 0.023378
| 2,096
| 17
| 627
| 123.294118
| 0.077186
| 0
| 0
| 0
| 0
| 0
| 0.02958
| 0
| 0
| 1
| 0.00334
| 0
| 0.090909
| 0
| null | null | 0
| 0.090909
| null | null | 0.090909
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fa5f164def109b09dfbbd12bdeedb6d6af991903
| 984
|
py
|
Python
|
dashboard/core/templatetags/core_extras.py
|
hebergui/webtrade
|
338fbf334b6ba173296635b380b53b088a87bb95
|
[
"Apache-2.0"
] | null | null | null |
dashboard/core/templatetags/core_extras.py
|
hebergui/webtrade
|
338fbf334b6ba173296635b380b53b088a87bb95
|
[
"Apache-2.0"
] | null | null | null |
dashboard/core/templatetags/core_extras.py
|
hebergui/webtrade
|
338fbf334b6ba173296635b380b53b088a87bb95
|
[
"Apache-2.0"
] | null | null | null |
import json
import statistics
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
@stringfilter
def split(value, arg):
return value.split(arg)
@register.filter(is_safe=True)
def js(obj):
return mark_safe(json.dumps(obj))
@register.filter
def get_attr(obj, attr):
return getattr(obj, attr)
@register.filter
def get_sum(li):
return sum(li)
@register.filter
def get_min(li):
return min(li)
@register.filter
def get_median(li):
return statistics.median(li)
@register.filter
def get_mean(li):
return statistics.mean(li)
@register.filter
def get_max(li):
return max(li)
@register.filter
def get_abs(a):
return abs(a)
@register.filter
def index(indexable, i):
if indexable is not None:
return indexable[i]
@register.filter
def get_pl(pru, price):
return round(100 * (price - pru) / pru, 2)
| 14.909091
| 55
| 0.720528
| 144
| 984
| 4.847222
| 0.333333
| 0.22063
| 0.219198
| 0.229226
| 0.157593
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004902
| 0.170732
| 984
| 65
| 56
| 15.138462
| 0.85049
| 0
| 0
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.268293
| false
| 0
| 0.121951
| 0.243902
| 0.658537
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d71bb519364d3c81f3c5a098c4b6698613f64631
| 18
|
py
|
Python
|
test.py
|
danielim/chenpy
|
502e00e610ce47469d75777f5c1224a316dda4f0
|
[
"MIT"
] | null | null | null |
test.py
|
danielim/chenpy
|
502e00e610ce47469d75777f5c1224a316dda4f0
|
[
"MIT"
] | null | null | null |
test.py
|
danielim/chenpy
|
502e00e610ce47469d75777f5c1224a316dda4f0
|
[
"MIT"
] | null | null | null |
import tldrwiki
| 4.5
| 15
| 0.777778
| 2
| 18
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 3
| 16
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d74711cccb093dafff7fd329dab02fe7da3c629b
| 48
|
py
|
Python
|
pytest-ex/ch1/test_two.py
|
leonhmi75/learning-materials
|
7342bf14e41ee2d1bf1b0b9b52f626318597a75e
|
[
"MIT"
] | 1
|
2019-05-01T05:25:22.000Z
|
2019-05-01T05:25:22.000Z
|
pytest-ex/ch1/test_two.py
|
leon-lei/learning-materials
|
7342bf14e41ee2d1bf1b0b9b52f626318597a75e
|
[
"MIT"
] | null | null | null |
pytest-ex/ch1/test_two.py
|
leon-lei/learning-materials
|
7342bf14e41ee2d1bf1b0b9b52f626318597a75e
|
[
"MIT"
] | null | null | null |
def test_failing():
assert(1,2,3) == (3,2,1)
| 24
| 28
| 0.5625
| 10
| 48
| 2.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.166667
| 48
| 2
| 28
| 24
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d79485dd00e3715f1bb3fe9967ca9dadecc1d7ff
| 41
|
py
|
Python
|
test.py
|
osirisguitar/gpt2-generator
|
a78af4f341f24f0452a44336309c6c9cbe09011f
|
[
"MIT"
] | null | null | null |
test.py
|
osirisguitar/gpt2-generator
|
a78af4f341f24f0452a44336309c6c9cbe09011f
|
[
"MIT"
] | null | null | null |
test.py
|
osirisguitar/gpt2-generator
|
a78af4f341f24f0452a44336309c6c9cbe09011f
|
[
"MIT"
] | null | null | null |
print("Message one")
print("Message two")
| 20.5
| 20
| 0.731707
| 6
| 41
| 5
| 0.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 2
| 21
| 20.5
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d7973249303aefe10d10f81cc67f24b5061d4732
| 284
|
py
|
Python
|
app/static.py
|
renato-farias/openstack-instances-monitoring
|
8b3aaa5276404f79036b2ec8fbaa32d37b3d7cf5
|
[
"Apache-2.0"
] | 18
|
2016-05-13T13:16:03.000Z
|
2021-06-01T23:37:13.000Z
|
app/static.py
|
renato-farias/openstack-instances-monitoring
|
8b3aaa5276404f79036b2ec8fbaa32d37b3d7cf5
|
[
"Apache-2.0"
] | 2
|
2016-06-20T09:24:30.000Z
|
2017-04-06T14:41:44.000Z
|
app/static.py
|
renato-farias/openstack-instances-monitoring
|
8b3aaa5276404f79036b2ec8fbaa32d37b3d7cf5
|
[
"Apache-2.0"
] | 9
|
2016-05-15T20:12:23.000Z
|
2018-01-18T12:18:53.000Z
|
# -*- coding: utf-8 -*-
from flask import current_app
def index(resource=None):
return current_app.send_static_file('index.html')
def instances():
return current_app.send_static_file('instances.html')
def report():
return current_app.send_static_file('report.html')
| 18.933333
| 57
| 0.732394
| 40
| 284
| 4.95
| 0.475
| 0.20202
| 0.242424
| 0.30303
| 0.454545
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0.004082
| 0.137324
| 284
| 14
| 58
| 20.285714
| 0.804082
| 0.073944
| 0
| 0
| 0
| 0
| 0.1341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ad4357264dc72a492e2e7707307ba1d582488d9a
| 9,013
|
py
|
Python
|
src/performant/tools.py
|
babe269/performant
|
416c07b06a1288ad16d50e275fcd78c3ffee3cfb
|
[
"MIT"
] | null | null | null |
src/performant/tools.py
|
babe269/performant
|
416c07b06a1288ad16d50e275fcd78c3ffee3cfb
|
[
"MIT"
] | null | null | null |
src/performant/tools.py
|
babe269/performant
|
416c07b06a1288ad16d50e275fcd78c3ffee3cfb
|
[
"MIT"
] | null | null | null |
def plotFormants(fileNames, vowels, language, centroid):
#Requirements if running on Colab: install !pip install praat-parselmouth !pip install tgt
#cd into a directory to store wav, txt and textgrid files before running.
import os
import requests
import pandas as pd
import urllib.request
import parselmouth
import tgt
import plotly.express as px
import numpy as np
vl = []
f1l = []
f2l = []
#List of vowels for NZE
vowels = vowels
print(os.getcwd())
for fileName in fileNames:
#Call WebMaus Basic Api to generate TextGrids.
print(" "+"└── "+ fileName)
headers = {
'content-type': 'multipart/form-data',
}
files = {
'SIGNAL': (fileName + '.wav', open(fileName + '.wav', 'rb')),
'LANGUAGE': (None, language),
'OUTFORMAT': (None, 'TextGrid'),
'TEXT': (fileName + '.txt', open(fileName + '.txt', 'rb')),
}
result = requests.post('https://clarin.phonetik.uni-muenchen.de/BASWebServices/services/runMAUSBasic', files=files)
decodeResponse = result.content.decode("utf-8")
xmlSplit = decodeResponse.split("<downloadLink>")
downURL = xmlSplit[1].split("</downloadLink>")
downURL = downURL[0]
urllib.request.urlretrieve(downURL, fileName+".TextGrid")
#Use parselmouth + Textgrid tools to obtain formant information
ps = parselmouth.Sound(fileName + ".wav")
formants = ps.to_formant_burg()
tg = tgt.io.read_textgrid(fileName + '.TextGrid')
mau = tg.get_tier_by_name("MAU")
mauObjs = mau.intervals
for i in vowels:
for j in mauObjs:
if i in j.text:
start = j.start_time
end = j.end_time
mid = (start + end) / 2
f1 = formants.get_value_at_time(1, mid, "HERTZ")
f2 = formants.get_value_at_time(2, mid, "HERTZ")
vl.append(j.text)
f1l.append(f1)
f2l.append(f2)
#Store formant values in dataframe.
d = {'vowel': vl, 'f1': f1l, 'f2': f2l}
df=pd.DataFrame(d)
display(df)
if centroid == True:
f1Centroid = df.groupby('vowel')['f1'].apply(lambda x: np.mean(x.tolist(), axis=0))
f2Centroid = df.groupby('vowel')['f2'].apply(lambda x: np.mean(x.tolist(), axis=0))
d = {'f1': f1Centroid, 'f2': f2Centroid}
finaldf=pd.DataFrame(d)
fig = px.scatter(finaldf, x="f2", y="f1",color= finaldf.index, text = finaldf.index, width=1000, height=900)
fig.update_layout(
font_family="Helvetica",
font_color="black",
font = {"size": 20}
)
fig.update_xaxes(
tickangle = 90,
title_text = "F2 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_yaxes(
tickangle = 90,
title_text = "F1 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_layout({
'plot_bgcolor': '#ffffff',
'paper_bgcolor': '#ffffff',
'yaxis_gridcolor':'#e5e5ea',
'xaxis_gridcolor':'#e5e5ea'
})
fig.update_traces(textposition='top center')
fig.update_yaxes(autorange="reversed")
fig.update_xaxes(autorange="reversed")
fig.update_xaxes(tickangle=0)
fig.update_yaxes(tickangle=0)
fig.show()
else:
finaldf = df
fig = px.scatter(finaldf, x="f2", y="f1",color= "vowel", text = "vowel", width=1000, height=900)
fig.update_layout(
font_family="Helvetica",
font_color="black",
font = {"size": 20}
)
fig.update_xaxes(
tickangle = 90,
title_text = "F2 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_yaxes(
tickangle = 90,
title_text = "F1 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_layout({
'plot_bgcolor': '#ffffff',
'paper_bgcolor': '#ffffff',
'yaxis_gridcolor':'#e5e5ea',
'xaxis_gridcolor':'#e5e5ea'
})
fig.update_traces(textposition='top center')
fig.update_yaxes(autorange="reversed")
fig.update_xaxes(autorange="reversed")
fig.update_xaxes(tickangle=0)
fig.update_yaxes(tickangle=0)
fig.show()
return finaldf
def plotPath(folderNames, vowels, language):
import os
import requests
import pandas as pd
import urllib.request
import parselmouth
import tgt
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
originpath = os.getcwd()
data = []
for index,folder in enumerate(folderNames):
vl = []
f1l = []
f2l = []
path = os.getcwd() +"\\"+ folder
os.chdir(path)
print(os.getcwd())
namelist = os.listdir(path)
fileNames =[]
for name in namelist:
unique = name.split(".")[0]
if unique not in fileNames:
fileNames.append(unique)
for fileName in fileNames:
#Call WebMaus Basic Api to generate TextGrids.
print(" "+"└── "+ fileName)
headers = {
'content-type': 'multipart/form-data',
}
files = {
'SIGNAL': (fileName + '.wav', open(fileName + '.wav', 'rb')),
'LANGUAGE': (None, language),
'OUTFORMAT': (None, 'TextGrid'),
'TEXT': (fileName + '.txt', open(fileName + '.txt', 'rb')),
}
result = requests.post('https://clarin.phonetik.uni-muenchen.de/BASWebServices/services/runMAUSBasic', files=files)
decodeResponse = result.content.decode("utf-8")
xmlSplit = decodeResponse.split("<downloadLink>")
downURL = xmlSplit[1].split("</downloadLink>")
downURL = downURL[0]
urllib.request.urlretrieve(downURL, fileName+".TextGrid")
#Use parselmouth + Textgrid tools to obtain formant information
ps = parselmouth.Sound(fileName + ".wav")
formants = ps.to_formant_burg()
tg = tgt.io.read_textgrid(fileName + '.TextGrid')
mau = tg.get_tier_by_name("MAU")
mauObjs = mau.intervals
for i in vowels:
for j in mauObjs:
if i in j.text:
start = j.start_time
end = j.end_time
mid = (start + end) / 2
f1 = formants.get_value_at_time(1, mid, "HERTZ")
f2 = formants.get_value_at_time(2, mid, "HERTZ")
vl.append(j.text)
f1l.append(f1)
f2l.append(f2)
if folder == "truth":
d = {'vowel': vl, 'f1': f1l, 'f2': f2l}
df=pd.DataFrame(d)
f1Centroid = df.groupby('vowel')['f1'].apply(lambda x: np.mean(x.tolist(), axis=0))
f2Centroid = df.groupby('vowel')['f2'].apply(lambda x: np.mean(x.tolist(), axis=0))
d = { 'f1': f1Centroid, 'f2': f2Centroid}
rDF=pd.DataFrame(d)
rDF = rDF.reset_index()
else:
#Store formant values in dataframe.
d = {'vowel': vl, 'f1': f1l, 'f2': f2l}
df=pd.DataFrame(d)
f1Centroid = df.groupby('vowel')['f1'].apply(lambda x: np.mean(x.tolist(), axis=0))
f2Centroid = df.groupby('vowel')['f2'].apply(lambda x: np.mean(x.tolist(), axis=0))
d = {'f1': f1Centroid, 'f2': f2Centroid, 'steps': folder, 'point': index}
df2=pd.DataFrame(d)
data.append(df2)
os.chdir(originpath)
full = pd.concat(data)
df = full
fig = px.line(full, x="f2", y="f1",color=df.index, width=1000, height=900, line_shape= 'spline', text = 'steps', line_group=df.index, )
if 'truth' in folderNames:
for i, d in enumerate(fig.data):
for index, row in rDF.iterrows():
if d.legendgroup == row.vowel:
fig.add_trace(go.Scatter(x=[row.f2], y = [row.f1], mode = "markers+text", showlegend=False, marker_color=d.line.color, text=row.vowel, textfont=dict(
size=30,
color=d.line.color
)))
fig.update_layout(
font_family="Helvetica",
font_color="black",
font = {"size": 20}
)
fig.update_xaxes(
tickangle = 90,
title_text = "F2 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_yaxes(
tickangle = 90,
title_text = "F1 (Hz)",
title_font = {"size": 20},
title_standoff = 20
)
fig.update_layout({
'plot_bgcolor': '#ffffff',
'paper_bgcolor': '#ffffff',
'yaxis_gridcolor':'#e5e5ea',
'xaxis_gridcolor':'#e5e5ea'
})
fig.update_traces(textposition='top center')
fig.update_yaxes(autorange="reversed")
fig.update_xaxes(autorange="reversed")
fig.update_xaxes(tickangle=0)
fig.update_yaxes(tickangle=0)
fig.show()
os.chdir(originpath)
return df
| 35.070039
| 163
| 0.562854
| 1,048
| 9,013
| 4.753817
| 0.211832
| 0.048776
| 0.018065
| 0.016861
| 0.786632
| 0.778804
| 0.778804
| 0.778804
| 0.778804
| 0.76676
| 0
| 0.02678
| 0.295684
| 9,013
| 257
| 164
| 35.070039
| 0.757089
| 0.051925
| 0
| 0.774468
| 0
| 0
| 0.125351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008511
| false
| 0
| 0.07234
| 0
| 0.089362
| 0.017021
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8ec05d1330ae68b60864f963ee382af8fb81175
| 28
|
py
|
Python
|
datacatalog/formats/tulane/__init__.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | null | null | null |
datacatalog/formats/tulane/__init__.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | 2
|
2019-07-25T15:39:04.000Z
|
2019-10-21T15:31:46.000Z
|
datacatalog/formats/tulane/__init__.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | 1
|
2019-10-15T14:33:44.000Z
|
2019-10-15T14:33:44.000Z
|
from .convert import Tulane
| 14
| 27
| 0.821429
| 4
| 28
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a8ef3b58fe05e8f36fb47a9deaf7d16e84e3090d
| 6,515
|
py
|
Python
|
seqgra/learner/dna.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/learner/dna.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/learner/dna.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | 2
|
2021-06-14T20:27:40.000Z
|
2021-06-14T20:29:29.000Z
|
"""MIT - CSAIL - Gifford Lab - seqgra
Abstract base class for learners
@author: Konstantin Krismer
"""
from __future__ import annotations
from typing import List
import itertools
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
from seqgra import ExampleSet
from seqgra.learner import MultiClassClassificationLearner
from seqgra.learner import MultiLabelClassificationLearner
from seqgra.learner import DNAHelper
from seqgra.model import ModelDefinition
class DNAMultiClassClassificationLearner(MultiClassClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
gpu_id: int = 0, silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir,
validate_data, gpu_id, silent=silent)
self.alphabet_size: int = 4
def encode_x(self, x: List[str]):
return np.stack([DNAHelper.convert_dense_to_one_hot_encoding(seq)
for seq in x])
def decode_x(self, x):
return np.stack([DNAHelper.convert_one_hot_to_dense_encoding(seq)
for seq in x])
def encode_y(self, y: List[str]):
if self.definition.labels is None:
raise Exception("unknown labels, call parse_examples_data or "
"load_model first")
labels = np.array(self.definition.labels)
return np.vstack([ex == labels for ex in y])
def decode_y(self, y):
if self.definition.labels is None:
raise Exception("unknown labels, call parse_examples_data or "
"load_model first")
labels = np.array(self.definition.labels)
if isinstance(y, list):
y = np.asarray(y)
elif not isinstance(y, np.ndarray):
raise Exception("y is neither list nor np.ndarry")
if y.dtype == np.float32 or y.dtype == np.float64 or \
y.dtype == np.float_:
# binarize y
true_idx = np.argmax(y, axis=1)
y = np.zeros(y.shape)
y[np.arange(len(y)), true_idx] = 1
y = y.astype(bool)
elif y.dtype == np.int8 or y.dtype == np.int16 or \
y.dtype == np.int32 or y.dtype == np.int64 or \
y.dtype == np.uint8 or y.dtype == np.uint16 or \
y.dtype == np.uint32 or y.dtype == np.uint64 or \
y.dtype == np.intp or y.dtype == np.uintp or \
y.dtype == np.int_:
y = y.astype(bool)
elif y.dtype != np.bool_:
raise Exception("y has invalid data type; valid data types "
"include bool, int, float")
decoded_y = np.vstack([labels[ex] for ex in y])
decoded_y = list(itertools.chain(*decoded_y))
return decoded_y
def parse_examples_data(self, file_name: str) -> ExampleSet:
df = pd.read_csv(file_name, sep="\t", dtype={"x": "string",
"y": "string"})
df = df.fillna("")
x: List[str] = df["x"].tolist()
y: List[str] = df["y"].tolist()
if self.validate_data:
self.check_sequence(x)
self.check_labels(y)
return ExampleSet(x, y)
def check_sequence(self, x: List[str]) -> bool:
return DNAHelper.check_sequence(x)
class DNAMultiLabelClassificationLearner(MultiLabelClassificationLearner):
def __init__(self, model_definition: ModelDefinition, data_dir: str,
output_dir: str, validate_data: bool = True,
gpu_id: int = 0, silent: bool = False) -> None:
super().__init__(model_definition, data_dir, output_dir,
validate_data, gpu_id, silent=silent)
self.alphabet_size: int = 4
def encode_x(self, x: List[str]):
return np.stack([DNAHelper.convert_dense_to_one_hot_encoding(seq)
for seq in x])
def decode_x(self, x):
return np.stack([DNAHelper.convert_one_hot_to_dense_encoding(seq)
for seq in x])
def encode_y(self, y: List[str]):
if self.definition.labels is None:
raise Exception("unknown labels, call parse_examples_data or "
"load_model first")
y = [ex.split("|") for ex in y]
mlb = MultiLabelBinarizer(classes=self.definition.labels)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = mlb.fit_transform(y).astype(bool)
return y
def decode_y(self, y):
if self.definition.labels is None:
raise Exception("unknown labels, call parse_examples_data or "
"load_model first")
labels = np.array(self.definition.labels)
if isinstance(y, list):
y = np.asarray(y)
elif not isinstance(y, np.ndarray):
raise Exception("y is neither list nor np.ndarry")
if y.dtype == np.float32 or y.dtype == np.float64 or \
y.dtype == np.float_:
# binarize y
y = np.greater(y, 0.5).astype(bool)
elif y.dtype == np.int8 or y.dtype == np.int16 or \
y.dtype == np.int32 or y.dtype == np.int64 or \
y.dtype == np.uint8 or y.dtype == np.uint16 or \
y.dtype == np.uint32 or y.dtype == np.uint64 or \
y.dtype == np.intp or y.dtype == np.uintp or \
y.dtype == np.int_:
y = y.astype(bool)
elif y.dtype != np.bool_:
raise Exception("y has invalid data type; valid data types "
"include bool, int, float")
decoded_y = [labels[ex] for ex in y]
decoded_y = ["|".join(ex) for ex in decoded_y]
return decoded_y
def parse_examples_data(self, file_name: str) -> ExampleSet:
df = pd.read_csv(file_name, sep="\t", dtype={"x": "string",
"y": "string"})
df = df.fillna("")
x: List[str] = df["x"].tolist()
y: List[str] = df["y"].replace(np.nan, "", regex=True).tolist()
if self.validate_data:
self.check_sequence(x)
self.check_labels(y)
return ExampleSet(x, y)
def check_sequence(self, x: List[str]) -> bool:
return DNAHelper.check_sequence(x)
| 38.550296
| 74
| 0.57452
| 835
| 6,515
| 4.338922
| 0.186826
| 0.049683
| 0.066243
| 0.066243
| 0.757107
| 0.757107
| 0.757107
| 0.757107
| 0.743307
| 0.743307
| 0
| 0.009905
| 0.318189
| 6,515
| 168
| 75
| 38.779762
| 0.805718
| 0.018419
| 0
| 0.763359
| 0
| 0
| 0.07484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10687
| false
| 0
| 0.091603
| 0.045802
| 0.305344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8ff1bc176272e500a07aebd74e964e8b2d647c7
| 179
|
py
|
Python
|
django_compat_patcher/fixers/__init__.py
|
jayvdb/django-compat-patcher
|
09a60f1fa7e860a32d506c92d684997492385dda
|
[
"MIT"
] | null | null | null |
django_compat_patcher/fixers/__init__.py
|
jayvdb/django-compat-patcher
|
09a60f1fa7e860a32d506c92d684997492385dda
|
[
"MIT"
] | null | null | null |
django_compat_patcher/fixers/__init__.py
|
jayvdb/django-compat-patcher
|
09a60f1fa7e860a32d506c92d684997492385dda
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
from . import django1_6, django1_7, django1_8, django1_9, django1_10, django1_11, django2_0, django2_1
| 29.833333
| 102
| 0.832402
| 27
| 179
| 4.962963
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 0.111732
| 179
| 5
| 103
| 35.8
| 0.72956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d15130120eee6120e788b34ff44aa50929cf2fbc
| 65
|
py
|
Python
|
dart/experiments/__init__.py
|
fpirovan/NoiseInjection
|
d1a8c90aaf45d435d40c476a2d2e74258920ff22
|
[
"RSA-MD"
] | null | null | null |
dart/experiments/__init__.py
|
fpirovan/NoiseInjection
|
d1a8c90aaf45d435d40c476a2d2e74258920ff22
|
[
"RSA-MD"
] | null | null | null |
dart/experiments/__init__.py
|
fpirovan/NoiseInjection
|
d1a8c90aaf45d435d40c476a2d2e74258920ff22
|
[
"RSA-MD"
] | 2
|
2020-11-06T06:57:35.000Z
|
2021-04-26T13:23:35.000Z
|
from .tools import *
from .net import *
from .framework import *
| 16.25
| 24
| 0.723077
| 9
| 65
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 3
| 25
| 21.666667
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f00006967ad3f1b9a9299831e27a0fa80919718
| 69
|
py
|
Python
|
6.py
|
BarisTeksin/project-euler
|
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
|
[
"MIT"
] | 4
|
2020-04-18T21:05:13.000Z
|
2020-04-26T15:39:14.000Z
|
6.py
|
BarisTeksin/project-euler
|
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
|
[
"MIT"
] | null | null | null |
6.py
|
BarisTeksin/project-euler
|
38a368d66fdd3bdc1d977059ba966fb7c1dcdc39
|
[
"MIT"
] | null | null | null |
print(sum(x for x in range(101))**2 - sum(x**2 for x in range(101)))
| 34.5
| 68
| 0.623188
| 17
| 69
| 2.529412
| 0.470588
| 0.186047
| 0.27907
| 0.511628
| 0.651163
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0.15942
| 69
| 1
| 69
| 69
| 0.603448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0f3b6de130bfdb209b8347e2c0a0e27691a2f8cf
| 9,075
|
py
|
Python
|
torchensemble/tests/test_all_models_multi_input.py
|
e-eight/Ensemble-Pytorch
|
83250759d3075795805d8c90f425191ab262b1c4
|
[
"BSD-3-Clause"
] | null | null | null |
torchensemble/tests/test_all_models_multi_input.py
|
e-eight/Ensemble-Pytorch
|
83250759d3075795805d8c90f425191ab262b1c4
|
[
"BSD-3-Clause"
] | null | null | null |
torchensemble/tests/test_all_models_multi_input.py
|
e-eight/Ensemble-Pytorch
|
83250759d3075795805d8c90f425191ab262b1c4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T07:44:20.000Z
|
2021-07-02T07:44:20.000Z
|
import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import torchensemble
from torchensemble.utils import io
from torchensemble.utils.logging import set_logger
# All classifiers
all_clf = [
torchensemble.FusionClassifier,
torchensemble.VotingClassifier,
torchensemble.BaggingClassifier,
torchensemble.GradientBoostingClassifier,
torchensemble.SnapshotEnsembleClassifier,
torchensemble.AdversarialTrainingClassifier,
torchensemble.FastGeometricClassifier,
]
# All regressors
all_reg = [
torchensemble.FusionRegressor,
torchensemble.VotingRegressor,
torchensemble.BaggingRegressor,
torchensemble.GradientBoostingRegressor,
torchensemble.SnapshotEnsembleRegressor,
torchensemble.AdversarialTrainingRegressor,
torchensemble.FastGeometricRegressor,
]
np.random.seed(0)
torch.manual_seed(0)
device = torch.device("cpu")
logger = set_logger("pytest_all_models_multiple_input")
# Base estimator
class MLP_clf(nn.Module):
def __init__(self):
super(MLP_clf, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 2)
def forward(self, X_1, X_2):
X_1 = X_1.view(X_1.size()[0], -1)
X_2 = X_2.view(X_2.size()[0], -1)
output_1 = self.linear1(X_1)
output_1 = self.linear2(output_1)
output_2 = self.linear1(X_2)
output_2 = self.linear2(output_2)
return 0.5 * output_1 + 0.5 * output_2
class MLP_reg(nn.Module):
def __init__(self):
super(MLP_reg, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 1)
def forward(self, X_1, X_2):
X_1 = X_1.view(X_1.size()[0], -1)
X_2 = X_2.view(X_2.size()[0], -1)
output_1 = self.linear1(X_1)
output_1 = self.linear2(output_1)
output_2 = self.linear1(X_2)
output_2 = self.linear2(output_2)
return 0.5 * output_1 + 0.5 * output_2
# Training data
X_train = torch.Tensor(
np.array(([0.1, 0.1], [0.2, 0.2], [0.3, 0.3], [0.4, 0.4]))
)
y_train_clf = torch.LongTensor(np.array(([0, 0, 1, 1])))
y_train_reg = torch.FloatTensor(np.array(([0.1, 0.2, 0.3, 0.4])))
y_train_reg = y_train_reg.view(-1, 1)
# Testing data
numpy_X_test = np.array(([0.5, 0.5], [0.6, 0.6]))
X_test = torch.Tensor(numpy_X_test)
y_test_clf = torch.LongTensor(np.array(([1, 0])))
y_test_reg = torch.FloatTensor(np.array(([0.5, 0.6])))
y_test_reg = y_test_reg.view(-1, 1)
@pytest.mark.parametrize("clf", all_clf)
def test_clf_class(clf):
"""
This unit test checks the training and evaluating stage of all classifiers.
"""
epochs = 1
n_estimators = 2
model = clf(estimator=MLP_clf, n_estimators=n_estimators, cuda=False)
# Optimizer
model.set_optimizer("Adam", lr=1e-3, weight_decay=5e-4)
# Scheduler (Snapshot Ensemble Excluded)
if not isinstance(model, torchensemble.SnapshotEnsembleClassifier):
model.set_scheduler("MultiStepLR", milestones=[2, 4])
# Prepare data with multiple inputs
train = TensorDataset(X_train, X_train, y_train_clf)
train_loader = DataLoader(train, batch_size=2, shuffle=False)
test = TensorDataset(X_test, X_test, y_test_clf)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Snapshot Ensemble needs more epochs
if isinstance(model, torchensemble.SnapshotEnsembleClassifier):
epochs = 6
# Train
model.fit(train_loader, epochs=epochs, test_loader=test_loader)
# Evaluate
model.evaluate(test_loader)
# Predict
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
model.predict(*data)
break
# Reload
new_model = clf(estimator=MLP_clf, n_estimators=n_estimators, cuda=False)
io.load(new_model)
new_model.evaluate(test_loader)
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
new_model.predict(*data)
break
@pytest.mark.parametrize("clf", all_clf)
def test_clf_object(clf):
"""
This unit test checks the training and evaluating stage of all classifiers.
"""
epochs = 1
n_estimators = 2
model = clf(estimator=MLP_clf(), n_estimators=n_estimators, cuda=False)
# Optimizer
model.set_optimizer("Adam", lr=1e-3, weight_decay=5e-4)
# Scheduler (Snapshot Ensemble Excluded)
if not isinstance(model, torchensemble.SnapshotEnsembleClassifier):
model.set_scheduler("MultiStepLR", milestones=[2, 4])
# Prepare data with multiple inputs
train = TensorDataset(X_train, X_train, y_train_clf)
train_loader = DataLoader(train, batch_size=2, shuffle=False)
test = TensorDataset(X_test, X_test, y_test_clf)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Snapshot Ensemble needs more epochs
if isinstance(model, torchensemble.SnapshotEnsembleClassifier):
epochs = 6
# Train
model.fit(train_loader, epochs=epochs, test_loader=test_loader)
# Evaluate
model.evaluate(test_loader)
# Predict
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
model.predict(*data)
break
# Reload
new_model = clf(estimator=MLP_clf(), n_estimators=n_estimators, cuda=False)
io.load(new_model)
new_model.evaluate(test_loader)
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
new_model.predict(*data)
break
@pytest.mark.parametrize("reg", all_reg)
def test_reg_class(reg):
"""
This unit test checks the training and evaluating stage of all regressors.
"""
epochs = 1
n_estimators = 2
model = reg(estimator=MLP_reg, n_estimators=n_estimators, cuda=False)
# Optimizer
model.set_optimizer("Adam", lr=1e-3, weight_decay=5e-4)
# Scheduler (Snapshot Ensemble Excluded)
if not isinstance(model, torchensemble.SnapshotEnsembleRegressor):
model.set_scheduler("MultiStepLR", milestones=[2, 4])
# Prepare data with multiple inputs
train = TensorDataset(X_train, X_train, y_train_reg)
train_loader = DataLoader(train, batch_size=2, shuffle=False)
test = TensorDataset(X_test, X_test, y_test_reg)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Snapshot Ensemble needs more epochs
if isinstance(model, torchensemble.SnapshotEnsembleRegressor):
epochs = 6
# Train
model.fit(train_loader, epochs=epochs, test_loader=test_loader)
# Evaluate
model.evaluate(test_loader)
# Predict
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
model.predict(*data)
break
# Reload
new_model = reg(estimator=MLP_reg, n_estimators=n_estimators, cuda=False)
io.load(new_model)
new_model.evaluate(test_loader)
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
new_model.predict(*data)
break
@pytest.mark.parametrize("reg", all_reg)
def test_reg_object(reg):
"""
This unit test checks the training and evaluating stage of all regressors.
"""
epochs = 1
n_estimators = 2
model = reg(estimator=MLP_reg(), n_estimators=n_estimators, cuda=False)
# Optimizer
model.set_optimizer("Adam", lr=1e-3, weight_decay=5e-4)
# Scheduler (Snapshot Ensemble Excluded)
if not isinstance(model, torchensemble.SnapshotEnsembleRegressor):
model.set_scheduler("MultiStepLR", milestones=[2, 4])
# Prepare data with multiple inputs
train = TensorDataset(X_train, X_train, y_train_reg)
train_loader = DataLoader(train, batch_size=2, shuffle=False)
test = TensorDataset(X_test, X_test, y_test_reg)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Snapshot Ensemble needs more epochs
if isinstance(model, torchensemble.SnapshotEnsembleRegressor):
epochs = 6
# Train
model.fit(train_loader, epochs=epochs, test_loader=test_loader)
# Evaluate
model.evaluate(test_loader)
# Predict
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
model.predict(*data)
break
# Reload
new_model = reg(estimator=MLP_reg(), n_estimators=n_estimators, cuda=False)
io.load(new_model)
new_model.evaluate(test_loader)
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, device)
new_model.predict(*data)
break
def test_split_data_target_invalid_data_type():
with pytest.raises(ValueError) as excinfo:
io.split_data_target(0.0, device, logger)
assert "Invalid dataloader" in str(excinfo.value)
def test_split_data_target_invalid_list_length():
with pytest.raises(ValueError) as excinfo:
io.split_data_target([0.0], device, logger)
assert "should at least contain two tensors" in str(excinfo.value)
| 29.464286
| 79
| 0.695537
| 1,222
| 9,075
| 4.943535
| 0.121113
| 0.04635
| 0.029796
| 0.028141
| 0.814104
| 0.798212
| 0.779672
| 0.770733
| 0.770733
| 0.760967
| 0
| 0.023093
| 0.198347
| 9,075
| 307
| 80
| 29.560261
| 0.807285
| 0.106997
| 0
| 0.644809
| 0
| 0
| 0.019983
| 0.003997
| 0
| 0
| 0
| 0
| 0.010929
| 1
| 0.054645
| false
| 0
| 0.043716
| 0
| 0.120219
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f3c0f60bd1240b2462f9d145d8774d9b29c0816
| 34
|
py
|
Python
|
path_finding/rl_algo.py
|
Maarten1999/Minor_ML3_Snake_AI
|
8a579634c94feb8f73b9bf00db78d6852993d3f6
|
[
"MIT"
] | null | null | null |
path_finding/rl_algo.py
|
Maarten1999/Minor_ML3_Snake_AI
|
8a579634c94feb8f73b9bf00db78d6852993d3f6
|
[
"MIT"
] | null | null | null |
path_finding/rl_algo.py
|
Maarten1999/Minor_ML3_Snake_AI
|
8a579634c94feb8f73b9bf00db78d6852993d3f6
|
[
"MIT"
] | null | null | null |
from stable_baselines import DQN
| 11.333333
| 32
| 0.852941
| 5
| 34
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f4abb67d40c804ea603d252baf08947e2775d3e
| 251
|
py
|
Python
|
src/layers/__init__.py
|
nkoppe/tf_Transformer_SerieseData
|
502bb4c917b47139503a9d59300108313f507bd6
|
[
"MIT"
] | null | null | null |
src/layers/__init__.py
|
nkoppe/tf_Transformer_SerieseData
|
502bb4c917b47139503a9d59300108313f507bd6
|
[
"MIT"
] | null | null | null |
src/layers/__init__.py
|
nkoppe/tf_Transformer_SerieseData
|
502bb4c917b47139503a9d59300108313f507bd6
|
[
"MIT"
] | null | null | null |
from .Attention import Attention
from .PositionalEncoding import PositionalEncoding
from .ScaledDotProductAttention import ScaledDotProductAttention
from .LayerNormalization import LayerNormalization
from .MultiHeadAttention import MultiHeadAttention
| 41.833333
| 64
| 0.900398
| 20
| 251
| 11.3
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079681
| 251
| 5
| 65
| 50.2
| 0.978355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f5d02eb86018865398a9002e028991094f09a4f
| 123
|
py
|
Python
|
utility/print_range.py
|
polde-live/learnpython
|
ff8ec96db1951d99797205d0bd491e542152a36f
|
[
"Unlicense"
] | null | null | null |
utility/print_range.py
|
polde-live/learnpython
|
ff8ec96db1951d99797205d0bd491e542152a36f
|
[
"Unlicense"
] | null | null | null |
utility/print_range.py
|
polde-live/learnpython
|
ff8ec96db1951d99797205d0bd491e542152a36f
|
[
"Unlicense"
] | null | null | null |
def print_range(x):
print "Printing for %d" %x, range(1, x-1)
xs = range(1,25);
for x in xs:
print_range(x)
| 13.666667
| 45
| 0.577236
| 24
| 123
| 2.875
| 0.458333
| 0.289855
| 0.318841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054945
| 0.260163
| 123
| 8
| 46
| 15.375
| 0.703297
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.6
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0f7bef1be2ac5de8d51156c9fa037d2d838bde1f
| 19
|
py
|
Python
|
chat/tests.py
|
Safintim/aiohttp-chat
|
eef9e14d67070176678a99af10c729a057c4cd00
|
[
"MIT"
] | 1
|
2019-02-05T20:14:41.000Z
|
2019-02-05T20:14:41.000Z
|
chat/tests.py
|
Safintim/aiohttp-chat
|
eef9e14d67070176678a99af10c729a057c4cd00
|
[
"MIT"
] | 157
|
2019-02-12T18:07:28.000Z
|
2022-02-10T07:14:24.000Z
|
chat/tests.py
|
Safintim/aiohttp-chat
|
eef9e14d67070176678a99af10c729a057c4cd00
|
[
"MIT"
] | null | null | null |
# TODO write tests
| 9.5
| 18
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0.842105
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e3bfa4d428dce49d313005b9fc42561cf2bf5b7
| 103
|
py
|
Python
|
spylib/utils/misc.py
|
SatelCreative/spylib
|
d7f5129c4f0bba3cbc7a2925fdee389dbbde75f1
|
[
"MIT"
] | 1
|
2022-03-11T18:19:32.000Z
|
2022-03-11T18:19:32.000Z
|
spylib/utils/misc.py
|
SatelCreative/spylib
|
d7f5129c4f0bba3cbc7a2925fdee389dbbde75f1
|
[
"MIT"
] | 32
|
2020-08-14T19:49:09.000Z
|
2022-03-31T22:18:09.000Z
|
spylib/utils/misc.py
|
SatelCreative/spylib
|
d7f5129c4f0bba3cbc7a2925fdee389dbbde75f1
|
[
"MIT"
] | null | null | null |
from shortuuid import ShortUUID
def get_unique_id() -> str:
return ShortUUID().random(length=10)
| 17.166667
| 40
| 0.737864
| 14
| 103
| 5.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 0.15534
| 103
| 5
| 41
| 20.6
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
7e408ad0fc6ba490f8107341c72b857a699ca195
| 37
|
py
|
Python
|
sciapp/__init__.py
|
pengguanjun/imagepy
|
d96ef98c2c3e93d368131fd2753bce164e1247cd
|
[
"BSD-4-Clause"
] | 1
|
2020-08-17T04:18:35.000Z
|
2020-08-17T04:18:35.000Z
|
sciapp/__init__.py
|
cycleuser/imagepy
|
5dc1a9a8137280c5215287392ba1b23d368bd7e9
|
[
"BSD-4-Clause"
] | null | null | null |
sciapp/__init__.py
|
cycleuser/imagepy
|
5dc1a9a8137280c5215287392ba1b23d368bd7e9
|
[
"BSD-4-Clause"
] | null | null | null |
from .app import App, Manager, Source
| 37
| 37
| 0.783784
| 6
| 37
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 1
| 37
| 37
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7e628a7702baf8fc310c2e0fde616ef5abfdb26f
| 37,729
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int12e/5.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int12e/5.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int12e/5.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 2713
passenger_arriving = (
(0, 6, 4, 4, 1, 0, 4, 5, 3, 6, 1, 0), # 0
(4, 10, 2, 0, 1, 0, 5, 8, 5, 5, 3, 0), # 1
(1, 6, 3, 2, 5, 0, 4, 4, 5, 5, 1, 0), # 2
(3, 10, 6, 3, 3, 0, 5, 5, 4, 2, 0, 0), # 3
(2, 6, 4, 1, 0, 0, 7, 8, 6, 6, 1, 0), # 4
(4, 9, 8, 3, 3, 0, 4, 7, 6, 7, 0, 0), # 5
(2, 6, 8, 4, 1, 0, 5, 6, 5, 0, 5, 0), # 6
(2, 8, 4, 5, 2, 0, 5, 6, 4, 5, 2, 0), # 7
(4, 5, 7, 3, 3, 0, 4, 12, 5, 2, 3, 0), # 8
(3, 7, 4, 3, 3, 0, 6, 8, 9, 2, 4, 0), # 9
(7, 2, 4, 4, 2, 0, 1, 10, 4, 5, 2, 0), # 10
(1, 9, 6, 4, 3, 0, 4, 5, 7, 4, 1, 0), # 11
(2, 8, 5, 2, 1, 0, 1, 8, 5, 2, 1, 0), # 12
(6, 7, 8, 4, 0, 0, 2, 5, 4, 3, 1, 0), # 13
(4, 5, 3, 3, 3, 0, 6, 6, 12, 3, 1, 0), # 14
(2, 12, 6, 2, 2, 0, 3, 5, 7, 5, 4, 0), # 15
(4, 8, 7, 4, 3, 0, 7, 4, 4, 4, 3, 0), # 16
(1, 5, 5, 3, 1, 0, 4, 10, 0, 8, 2, 0), # 17
(2, 12, 8, 2, 3, 0, 7, 8, 4, 7, 4, 0), # 18
(2, 2, 6, 2, 2, 0, 6, 9, 4, 2, 1, 0), # 19
(5, 6, 10, 2, 0, 0, 4, 5, 0, 2, 2, 0), # 20
(2, 6, 11, 1, 1, 0, 5, 9, 4, 3, 1, 0), # 21
(4, 14, 5, 3, 3, 0, 4, 10, 2, 8, 1, 0), # 22
(4, 8, 10, 7, 2, 0, 7, 12, 2, 5, 4, 0), # 23
(3, 3, 7, 3, 4, 0, 6, 8, 4, 5, 2, 0), # 24
(4, 9, 3, 1, 3, 0, 9, 10, 7, 5, 4, 0), # 25
(4, 6, 6, 5, 0, 0, 4, 5, 2, 5, 3, 0), # 26
(2, 10, 4, 4, 3, 0, 6, 9, 4, 3, 2, 0), # 27
(5, 8, 6, 5, 2, 0, 8, 6, 8, 4, 3, 0), # 28
(7, 5, 6, 2, 4, 0, 8, 7, 8, 3, 4, 0), # 29
(5, 6, 6, 1, 4, 0, 5, 5, 3, 1, 0, 0), # 30
(3, 8, 8, 2, 1, 0, 9, 6, 12, 5, 4, 0), # 31
(8, 9, 3, 6, 1, 0, 6, 6, 4, 6, 2, 0), # 32
(1, 6, 6, 1, 0, 0, 2, 6, 2, 5, 4, 0), # 33
(7, 10, 12, 3, 2, 0, 6, 11, 8, 6, 3, 0), # 34
(6, 8, 9, 4, 2, 0, 12, 11, 6, 1, 3, 0), # 35
(3, 6, 4, 4, 2, 0, 8, 10, 3, 4, 1, 0), # 36
(3, 10, 6, 3, 5, 0, 3, 10, 4, 4, 3, 0), # 37
(4, 10, 4, 6, 2, 0, 2, 4, 6, 4, 3, 0), # 38
(4, 11, 3, 3, 1, 0, 3, 8, 4, 6, 1, 0), # 39
(4, 8, 5, 2, 0, 0, 2, 9, 4, 6, 1, 0), # 40
(4, 7, 7, 1, 1, 0, 7, 10, 7, 1, 4, 0), # 41
(2, 2, 6, 6, 2, 0, 5, 10, 4, 2, 4, 0), # 42
(5, 4, 7, 5, 0, 0, 9, 10, 6, 2, 2, 0), # 43
(5, 3, 2, 3, 1, 0, 8, 4, 2, 7, 0, 0), # 44
(4, 4, 7, 3, 2, 0, 7, 11, 2, 2, 4, 0), # 45
(2, 9, 8, 2, 3, 0, 8, 7, 7, 5, 3, 0), # 46
(5, 8, 6, 1, 2, 0, 2, 9, 11, 5, 1, 0), # 47
(3, 4, 8, 2, 3, 0, 8, 9, 2, 3, 0, 0), # 48
(6, 12, 9, 2, 2, 0, 4, 7, 5, 6, 4, 0), # 49
(7, 5, 7, 4, 0, 0, 5, 6, 7, 2, 1, 0), # 50
(5, 8, 4, 4, 0, 0, 5, 10, 7, 7, 1, 0), # 51
(4, 8, 6, 8, 4, 0, 7, 11, 2, 4, 1, 0), # 52
(3, 13, 6, 3, 0, 0, 4, 6, 1, 4, 1, 0), # 53
(2, 8, 5, 3, 3, 0, 7, 7, 3, 5, 1, 0), # 54
(5, 7, 5, 3, 1, 0, 7, 10, 3, 4, 0, 0), # 55
(3, 8, 2, 2, 1, 0, 2, 5, 5, 5, 0, 0), # 56
(3, 8, 5, 1, 2, 0, 7, 7, 4, 4, 1, 0), # 57
(7, 7, 7, 4, 0, 0, 4, 8, 4, 3, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(3.1795818700614573, 8.15575284090909, 9.59308322622108, 7.603532608695652, 8.571634615384614, 5.708152173913044), # 0
(3.20942641205736, 8.246449918455387, 9.644898645029993, 7.6458772644927535, 8.635879807692307, 5.706206567028985), # 1
(3.238930172666081, 8.335801683501682, 9.695484147386459, 7.687289855072463, 8.69876923076923, 5.704201449275362), # 2
(3.268068107989464, 8.42371171875, 9.744802779562981, 7.727735054347824, 8.760245192307693, 5.702137092391305), # 3
(3.296815174129353, 8.510083606902358, 9.792817587832047, 7.767177536231884, 8.82025, 5.700013768115941), # 4
(3.3251463271875914, 8.594820930660775, 9.839491618466152, 7.805581974637681, 8.87872596153846, 5.697831748188405), # 5
(3.353036523266023, 8.677827272727273, 9.88478791773779, 7.842913043478261, 8.935615384615383, 5.695591304347826), # 6
(3.380460718466491, 8.75900621580387, 9.92866953191945, 7.879135416666666, 8.990860576923078, 5.693292708333334), # 7
(3.40739386889084, 8.83826134259259, 9.971099507283634, 7.914213768115941, 9.044403846153847, 5.6909362318840575), # 8
(3.4338109306409126, 8.915496235795453, 10.012040890102828, 7.9481127717391304, 9.0961875, 5.68852214673913), # 9
(3.459686859818554, 8.990614478114479, 10.051456726649528, 7.980797101449276, 9.146153846153846, 5.68605072463768), # 10
(3.4849966125256073, 9.063519652251683, 10.089310063196228, 8.012231431159421, 9.194245192307692, 5.683522237318841), # 11
(3.509715144863916, 9.134115340909089, 10.125563946015424, 8.042380434782608, 9.240403846153844, 5.680936956521738), # 12
(3.5338174129353224, 9.20230512678872, 10.160181421379605, 8.071208786231884, 9.284572115384616, 5.678295153985506), # 13
(3.5572783728416737, 9.267992592592593, 10.193125535561265, 8.098681159420288, 9.326692307692307, 5.6755971014492745), # 14
(3.5800729806848106, 9.331081321022726, 10.224359334832902, 8.124762228260868, 9.36670673076923, 5.672843070652174), # 15
(3.6021761925665783, 9.391474894781144, 10.25384586546701, 8.149416666666665, 9.404557692307693, 5.6700333333333335), # 16
(3.6235629645888205, 9.449076896569863, 10.281548173736075, 8.172609148550725, 9.4401875, 5.667168161231884), # 17
(3.64420825285338, 9.503790909090908, 10.307429305912597, 8.194304347826087, 9.473538461538464, 5.664247826086956), # 18
(3.664087013462101, 9.555520515046295, 10.331452308269066, 8.214466938405796, 9.504552884615384, 5.661272599637681), # 19
(3.683174202516827, 9.604169297138045, 10.353580227077975, 8.2330615942029, 9.533173076923077, 5.658242753623187), # 20
(3.7014447761194034, 9.649640838068178, 10.373776108611827, 8.250052989130435, 9.559341346153845, 5.655158559782609), # 21
(3.7188736903716704, 9.69183872053872, 10.3920029991431, 8.26540579710145, 9.582999999999998, 5.652020289855073), # 22
(3.7354359013754754, 9.730666527251683, 10.408223944944302, 8.279084692028986, 9.604091346153846, 5.6488282155797105), # 23
(3.75110636523266, 9.76602784090909, 10.422401992287917, 8.291054347826087, 9.62255769230769, 5.645582608695652), # 24
(3.7658600380450684, 9.797826244212962, 10.434500187446444, 8.301279438405798, 9.638341346153844, 5.642283740942029), # 25
(3.779671875914545, 9.825965319865318, 10.444481576692374, 8.309724637681159, 9.651384615384615, 5.63893188405797), # 26
(3.792516834942932, 9.85034865056818, 10.452309206298198, 8.316354619565217, 9.661629807692309, 5.635527309782609), # 27
(3.804369871232075, 9.870879819023568, 10.457946122536418, 8.321134057971014, 9.66901923076923, 5.632070289855072), # 28
(3.815205940883816, 9.887462407933501, 10.461355371679518, 8.324027626811594, 9.673495192307692, 5.628561096014493), # 29
(3.8249999999999997, 9.9, 10.4625, 8.325, 9.674999999999999, 5.625), # 30
(3.834164434143222, 9.910414559659088, 10.461641938405796, 8.324824387254901, 9.674452393617022, 5.620051511744128), # 31
(3.843131010230179, 9.920691477272728, 10.459092028985506, 8.324300980392156, 9.672821276595744, 5.612429710144928), # 32
(3.8519037563938614, 9.930829474431818, 10.45488668478261, 8.323434926470588, 9.670124202127658, 5.6022092203898035), # 33
(3.860486700767263, 9.940827272727272, 10.449062318840578, 8.32223137254902, 9.666378723404256, 5.589464667666167), # 34
(3.8688838714833755, 9.950683593749998, 10.441655344202898, 8.320695465686274, 9.661602393617022, 5.574270677161419), # 35
(3.8770992966751923, 9.96039715909091, 10.432702173913043, 8.318832352941177, 9.655812765957448, 5.556701874062968), # 36
(3.885137004475703, 9.96996669034091, 10.422239221014491, 8.316647181372549, 9.64902739361702, 5.536832883558221), # 37
(3.893001023017902, 9.979390909090908, 10.410302898550723, 8.314145098039214, 9.641263829787233, 5.514738330834581), # 38
(3.900695380434782, 9.988668536931817, 10.396929619565215, 8.31133125, 9.632539627659574, 5.490492841079459), # 39
(3.908224104859335, 9.997798295454546, 10.382155797101449, 8.308210784313726, 9.62287234042553, 5.464171039480259), # 40
(3.915591224424552, 10.006778906249998, 10.366017844202899, 8.304788848039216, 9.612279521276594, 5.435847551224389), # 41
(3.9228007672634266, 10.015609090909093, 10.348552173913044, 8.301070588235293, 9.600778723404256, 5.40559700149925), # 42
(3.929856761508952, 10.024287571022725, 10.329795199275361, 8.297061151960785, 9.5883875, 5.373494015492254), # 43
(3.936763235294117, 10.032813068181818, 10.309783333333334, 8.292765686274508, 9.575123404255319, 5.339613218390804), # 44
(3.9435242167519178, 10.041184303977271, 10.288552989130435, 8.288189338235293, 9.561003989361701, 5.304029235382309), # 45
(3.9501437340153456, 10.0494, 10.266140579710147, 8.28333725490196, 9.546046808510638, 5.266816691654173), # 46
(3.956625815217391, 10.05745887784091, 10.24258251811594, 8.278214583333332, 9.530269414893617, 5.228050212393803), # 47
(3.962974488491049, 10.065359659090909, 10.217915217391303, 8.272826470588234, 9.513689361702127, 5.187804422788607), # 48
(3.9691937819693086, 10.073101065340907, 10.19217509057971, 8.26717806372549, 9.49632420212766, 5.146153948025987), # 49
(3.9752877237851663, 10.080681818181816, 10.165398550724637, 8.261274509803922, 9.478191489361702, 5.103173413293353), # 50
(3.9812603420716113, 10.088100639204544, 10.137622010869565, 8.255120955882353, 9.459308776595744, 5.0589374437781105), # 51
(3.987115664961637, 10.09535625, 10.10888188405797, 8.248722549019607, 9.439693617021277, 5.013520664667666), # 52
(3.992857720588235, 10.10244737215909, 10.079214583333332, 8.24208443627451, 9.419363563829787, 4.966997701149425), # 53
(3.9984905370843995, 10.109372727272726, 10.04865652173913, 8.235211764705882, 9.398336170212765, 4.919443178410794), # 54
(4.00401814258312, 10.116131036931817, 10.017244112318838, 8.22810968137255, 9.376628989361702, 4.87093172163918), # 55
(4.0094445652173905, 10.122721022727271, 9.985013768115941, 8.220783333333333, 9.354259574468085, 4.821537956021989), # 56
(4.014773833120205, 10.129141406250001, 9.952001902173912, 8.213237867647058, 9.331245478723403, 4.771336506746626), # 57
(4.0200099744245525, 10.135390909090907, 9.91824492753623, 8.20547843137255, 9.307604255319148, 4.7204019990005), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(0, 6, 4, 4, 1, 0, 4, 5, 3, 6, 1, 0), # 0
(4, 16, 6, 4, 2, 0, 9, 13, 8, 11, 4, 0), # 1
(5, 22, 9, 6, 7, 0, 13, 17, 13, 16, 5, 0), # 2
(8, 32, 15, 9, 10, 0, 18, 22, 17, 18, 5, 0), # 3
(10, 38, 19, 10, 10, 0, 25, 30, 23, 24, 6, 0), # 4
(14, 47, 27, 13, 13, 0, 29, 37, 29, 31, 6, 0), # 5
(16, 53, 35, 17, 14, 0, 34, 43, 34, 31, 11, 0), # 6
(18, 61, 39, 22, 16, 0, 39, 49, 38, 36, 13, 0), # 7
(22, 66, 46, 25, 19, 0, 43, 61, 43, 38, 16, 0), # 8
(25, 73, 50, 28, 22, 0, 49, 69, 52, 40, 20, 0), # 9
(32, 75, 54, 32, 24, 0, 50, 79, 56, 45, 22, 0), # 10
(33, 84, 60, 36, 27, 0, 54, 84, 63, 49, 23, 0), # 11
(35, 92, 65, 38, 28, 0, 55, 92, 68, 51, 24, 0), # 12
(41, 99, 73, 42, 28, 0, 57, 97, 72, 54, 25, 0), # 13
(45, 104, 76, 45, 31, 0, 63, 103, 84, 57, 26, 0), # 14
(47, 116, 82, 47, 33, 0, 66, 108, 91, 62, 30, 0), # 15
(51, 124, 89, 51, 36, 0, 73, 112, 95, 66, 33, 0), # 16
(52, 129, 94, 54, 37, 0, 77, 122, 95, 74, 35, 0), # 17
(54, 141, 102, 56, 40, 0, 84, 130, 99, 81, 39, 0), # 18
(56, 143, 108, 58, 42, 0, 90, 139, 103, 83, 40, 0), # 19
(61, 149, 118, 60, 42, 0, 94, 144, 103, 85, 42, 0), # 20
(63, 155, 129, 61, 43, 0, 99, 153, 107, 88, 43, 0), # 21
(67, 169, 134, 64, 46, 0, 103, 163, 109, 96, 44, 0), # 22
(71, 177, 144, 71, 48, 0, 110, 175, 111, 101, 48, 0), # 23
(74, 180, 151, 74, 52, 0, 116, 183, 115, 106, 50, 0), # 24
(78, 189, 154, 75, 55, 0, 125, 193, 122, 111, 54, 0), # 25
(82, 195, 160, 80, 55, 0, 129, 198, 124, 116, 57, 0), # 26
(84, 205, 164, 84, 58, 0, 135, 207, 128, 119, 59, 0), # 27
(89, 213, 170, 89, 60, 0, 143, 213, 136, 123, 62, 0), # 28
(96, 218, 176, 91, 64, 0, 151, 220, 144, 126, 66, 0), # 29
(101, 224, 182, 92, 68, 0, 156, 225, 147, 127, 66, 0), # 30
(104, 232, 190, 94, 69, 0, 165, 231, 159, 132, 70, 0), # 31
(112, 241, 193, 100, 70, 0, 171, 237, 163, 138, 72, 0), # 32
(113, 247, 199, 101, 70, 0, 173, 243, 165, 143, 76, 0), # 33
(120, 257, 211, 104, 72, 0, 179, 254, 173, 149, 79, 0), # 34
(126, 265, 220, 108, 74, 0, 191, 265, 179, 150, 82, 0), # 35
(129, 271, 224, 112, 76, 0, 199, 275, 182, 154, 83, 0), # 36
(132, 281, 230, 115, 81, 0, 202, 285, 186, 158, 86, 0), # 37
(136, 291, 234, 121, 83, 0, 204, 289, 192, 162, 89, 0), # 38
(140, 302, 237, 124, 84, 0, 207, 297, 196, 168, 90, 0), # 39
(144, 310, 242, 126, 84, 0, 209, 306, 200, 174, 91, 0), # 40
(148, 317, 249, 127, 85, 0, 216, 316, 207, 175, 95, 0), # 41
(150, 319, 255, 133, 87, 0, 221, 326, 211, 177, 99, 0), # 42
(155, 323, 262, 138, 87, 0, 230, 336, 217, 179, 101, 0), # 43
(160, 326, 264, 141, 88, 0, 238, 340, 219, 186, 101, 0), # 44
(164, 330, 271, 144, 90, 0, 245, 351, 221, 188, 105, 0), # 45
(166, 339, 279, 146, 93, 0, 253, 358, 228, 193, 108, 0), # 46
(171, 347, 285, 147, 95, 0, 255, 367, 239, 198, 109, 0), # 47
(174, 351, 293, 149, 98, 0, 263, 376, 241, 201, 109, 0), # 48
(180, 363, 302, 151, 100, 0, 267, 383, 246, 207, 113, 0), # 49
(187, 368, 309, 155, 100, 0, 272, 389, 253, 209, 114, 0), # 50
(192, 376, 313, 159, 100, 0, 277, 399, 260, 216, 115, 0), # 51
(196, 384, 319, 167, 104, 0, 284, 410, 262, 220, 116, 0), # 52
(199, 397, 325, 170, 104, 0, 288, 416, 263, 224, 117, 0), # 53
(201, 405, 330, 173, 107, 0, 295, 423, 266, 229, 118, 0), # 54
(206, 412, 335, 176, 108, 0, 302, 433, 269, 233, 118, 0), # 55
(209, 420, 337, 178, 109, 0, 304, 438, 274, 238, 118, 0), # 56
(212, 428, 342, 179, 111, 0, 311, 445, 278, 242, 119, 0), # 57
(219, 435, 349, 183, 111, 0, 315, 453, 282, 245, 121, 0), # 58
(219, 435, 349, 183, 111, 0, 315, 453, 282, 245, 121, 0), # 59
)
passenger_arriving_rate = (
(3.1795818700614573, 6.524602272727271, 5.755849935732647, 3.0414130434782605, 1.7143269230769227, 0.0, 5.708152173913044, 6.857307692307691, 4.562119565217391, 3.8372332904884314, 1.6311505681818177, 0.0), # 0
(3.20942641205736, 6.597159934764309, 5.786939187017996, 3.0583509057971012, 1.7271759615384612, 0.0, 5.706206567028985, 6.908703846153845, 4.587526358695652, 3.857959458011997, 1.6492899836910773, 0.0), # 1
(3.238930172666081, 6.668641346801345, 5.817290488431875, 3.074915942028985, 1.7397538461538458, 0.0, 5.704201449275362, 6.959015384615383, 4.612373913043478, 3.8781936589545833, 1.6671603367003363, 0.0), # 2
(3.268068107989464, 6.738969375, 5.846881667737788, 3.091094021739129, 1.7520490384615384, 0.0, 5.702137092391305, 7.0081961538461535, 4.636641032608694, 3.897921111825192, 1.68474234375, 0.0), # 3
(3.296815174129353, 6.808066885521885, 5.875690552699228, 3.106871014492753, 1.76405, 0.0, 5.700013768115941, 7.0562, 4.66030652173913, 3.9171270351328187, 1.7020167213804713, 0.0), # 4
(3.3251463271875914, 6.87585674452862, 5.903694971079691, 3.122232789855072, 1.775745192307692, 0.0, 5.697831748188405, 7.102980769230768, 4.6833491847826085, 3.9357966473864603, 1.718964186132155, 0.0), # 5
(3.353036523266023, 6.942261818181818, 5.930872750642674, 3.137165217391304, 1.7871230769230766, 0.0, 5.695591304347826, 7.148492307692306, 4.705747826086957, 3.953915167095116, 1.7355654545454544, 0.0), # 6
(3.380460718466491, 7.007204972643096, 5.95720171915167, 3.1516541666666664, 1.7981721153846155, 0.0, 5.693292708333334, 7.192688461538462, 4.727481249999999, 3.97146781276778, 1.751801243160774, 0.0), # 7
(3.40739386889084, 7.0706090740740715, 5.982659704370181, 3.165685507246376, 1.8088807692307691, 0.0, 5.6909362318840575, 7.2355230769230765, 4.7485282608695645, 3.9884398029134536, 1.7676522685185179, 0.0), # 8
(3.4338109306409126, 7.132396988636362, 6.007224534061696, 3.179245108695652, 1.8192374999999996, 0.0, 5.68852214673913, 7.2769499999999985, 4.768867663043478, 4.004816356041131, 1.7830992471590905, 0.0), # 9
(3.459686859818554, 7.1924915824915825, 6.030874035989717, 3.19231884057971, 1.829230769230769, 0.0, 5.68605072463768, 7.316923076923076, 4.7884782608695655, 4.020582690659811, 1.7981228956228956, 0.0), # 10
(3.4849966125256073, 7.250815721801346, 6.053586037917737, 3.204892572463768, 1.8388490384615384, 0.0, 5.683522237318841, 7.355396153846153, 4.807338858695652, 4.0357240252784905, 1.8127039304503365, 0.0), # 11
(3.509715144863916, 7.30729227272727, 6.0753383676092545, 3.2169521739130427, 1.8480807692307688, 0.0, 5.680936956521738, 7.392323076923075, 4.825428260869565, 4.050225578406169, 1.8268230681818176, 0.0), # 12
(3.5338174129353224, 7.361844101430976, 6.096108852827762, 3.228483514492753, 1.8569144230769232, 0.0, 5.678295153985506, 7.427657692307693, 4.84272527173913, 4.0640725685518415, 1.840461025357744, 0.0), # 13
(3.5572783728416737, 7.414394074074074, 6.115875321336759, 3.2394724637681147, 1.8653384615384612, 0.0, 5.6755971014492745, 7.461353846153845, 4.859208695652172, 4.077250214224506, 1.8535985185185184, 0.0), # 14
(3.5800729806848106, 7.46486505681818, 6.134615600899742, 3.249904891304347, 1.873341346153846, 0.0, 5.672843070652174, 7.493365384615384, 4.874857336956521, 4.089743733933161, 1.866216264204545, 0.0), # 15
(3.6021761925665783, 7.513179915824915, 6.152307519280206, 3.259766666666666, 1.8809115384615382, 0.0, 5.6700333333333335, 7.523646153846153, 4.889649999999999, 4.101538346186803, 1.8782949789562287, 0.0), # 16
(3.6235629645888205, 7.55926151725589, 6.168928904241645, 3.26904365942029, 1.8880374999999998, 0.0, 5.667168161231884, 7.552149999999999, 4.903565489130435, 4.11261926949443, 1.8898153793139725, 0.0), # 17
(3.64420825285338, 7.603032727272725, 6.184457583547558, 3.2777217391304343, 1.8947076923076926, 0.0, 5.664247826086956, 7.578830769230771, 4.916582608695652, 4.122971722365039, 1.9007581818181813, 0.0), # 18
(3.664087013462101, 7.644416412037035, 6.198871384961439, 3.285786775362318, 1.9009105769230765, 0.0, 5.661272599637681, 7.603642307692306, 4.928680163043477, 4.132580923307626, 1.9111041030092588, 0.0), # 19
(3.683174202516827, 7.683335437710435, 6.2121481362467845, 3.2932246376811594, 1.9066346153846152, 0.0, 5.658242753623187, 7.626538461538461, 4.93983695652174, 4.14143209083119, 1.9208338594276086, 0.0), # 20
(3.7014447761194034, 7.719712670454542, 6.224265665167096, 3.3000211956521737, 1.911868269230769, 0.0, 5.655158559782609, 7.647473076923076, 4.950031793478261, 4.14951044344473, 1.9299281676136355, 0.0), # 21
(3.7188736903716704, 7.753470976430976, 6.23520179948586, 3.3061623188405793, 1.9165999999999994, 0.0, 5.652020289855073, 7.666399999999998, 4.959243478260869, 4.15680119965724, 1.938367744107744, 0.0), # 22
(3.7354359013754754, 7.784533221801346, 6.244934366966581, 3.311633876811594, 1.920818269230769, 0.0, 5.6488282155797105, 7.683273076923076, 4.967450815217392, 4.163289577977721, 1.9461333054503365, 0.0), # 23
(3.75110636523266, 7.812822272727271, 6.25344119537275, 3.3164217391304347, 1.9245115384615379, 0.0, 5.645582608695652, 7.6980461538461515, 4.974632608695652, 4.168960796915166, 1.9532055681818177, 0.0), # 24
(3.7658600380450684, 7.838260995370368, 6.260700112467866, 3.320511775362319, 1.9276682692307685, 0.0, 5.642283740942029, 7.710673076923074, 4.980767663043479, 4.173800074978577, 1.959565248842592, 0.0), # 25
(3.779671875914545, 7.860772255892254, 6.266688946015424, 3.3238898550724634, 1.9302769230769228, 0.0, 5.63893188405797, 7.721107692307691, 4.985834782608695, 4.177792630676949, 1.9651930639730635, 0.0), # 26
(3.792516834942932, 7.8802789204545425, 6.2713855237789184, 3.326541847826087, 1.9323259615384616, 0.0, 5.635527309782609, 7.729303846153846, 4.98981277173913, 4.180923682519278, 1.9700697301136356, 0.0), # 27
(3.804369871232075, 7.8967038552188535, 6.2747676735218505, 3.328453623188405, 1.9338038461538458, 0.0, 5.632070289855072, 7.735215384615383, 4.992680434782608, 4.183178449014567, 1.9741759638047134, 0.0), # 28
(3.815205940883816, 7.9099699263468, 6.276813223007711, 3.3296110507246373, 1.9346990384615383, 0.0, 5.628561096014493, 7.738796153846153, 4.994416576086956, 4.184542148671807, 1.9774924815867, 0.0), # 29
(3.8249999999999997, 7.92, 6.2775, 3.3299999999999996, 1.9349999999999996, 0.0, 5.625, 7.739999999999998, 4.994999999999999, 4.185, 1.98, 0.0), # 30
(3.834164434143222, 7.92833164772727, 6.276985163043477, 3.3299297549019604, 1.9348904787234043, 0.0, 5.620051511744128, 7.739561914893617, 4.994894632352941, 4.184656775362318, 1.9820829119318175, 0.0), # 31
(3.843131010230179, 7.936553181818182, 6.275455217391303, 3.329720392156862, 1.9345642553191487, 0.0, 5.612429710144928, 7.738257021276595, 4.994580588235293, 4.1836368115942015, 1.9841382954545455, 0.0), # 32
(3.8519037563938614, 7.944663579545454, 6.272932010869566, 3.329373970588235, 1.9340248404255314, 0.0, 5.6022092203898035, 7.736099361702125, 4.994060955882353, 4.181954673913044, 1.9861658948863634, 0.0), # 33
(3.860486700767263, 7.952661818181817, 6.269437391304347, 3.3288925490196077, 1.9332757446808508, 0.0, 5.589464667666167, 7.733102978723403, 4.993338823529411, 4.179624927536231, 1.9881654545454543, 0.0), # 34
(3.8688838714833755, 7.960546874999998, 6.264993206521739, 3.328278186274509, 1.9323204787234043, 0.0, 5.574270677161419, 7.729281914893617, 4.9924172794117645, 4.176662137681159, 1.9901367187499994, 0.0), # 35
(3.8770992966751923, 7.968317727272727, 6.259621304347825, 3.3275329411764707, 1.9311625531914893, 0.0, 5.556701874062968, 7.724650212765957, 4.9912994117647065, 4.173080869565217, 1.9920794318181818, 0.0), # 36
(3.885137004475703, 7.975973352272726, 6.253343532608695, 3.3266588725490194, 1.9298054787234038, 0.0, 5.536832883558221, 7.719221914893615, 4.989988308823529, 4.168895688405796, 1.9939933380681816, 0.0), # 37
(3.893001023017902, 7.983512727272726, 6.246181739130434, 3.325658039215685, 1.9282527659574464, 0.0, 5.514738330834581, 7.713011063829786, 4.988487058823528, 4.164121159420289, 1.9958781818181814, 0.0), # 38
(3.900695380434782, 7.990934829545453, 6.238157771739129, 3.3245324999999997, 1.9265079255319146, 0.0, 5.490492841079459, 7.7060317021276585, 4.98679875, 4.1587718478260856, 1.9977337073863632, 0.0), # 39
(3.908224104859335, 7.998238636363636, 6.229293478260869, 3.32328431372549, 1.924574468085106, 0.0, 5.464171039480259, 7.698297872340424, 4.984926470588236, 4.1528623188405795, 1.999559659090909, 0.0), # 40
(3.915591224424552, 8.005423124999998, 6.219610706521739, 3.321915539215686, 1.9224559042553186, 0.0, 5.435847551224389, 7.689823617021275, 4.982873308823529, 4.146407137681159, 2.0013557812499996, 0.0), # 41
(3.9228007672634266, 8.012487272727274, 6.209131304347826, 3.320428235294117, 1.920155744680851, 0.0, 5.40559700149925, 7.680622978723404, 4.980642352941175, 4.1394208695652175, 2.0031218181818184, 0.0), # 42
(3.929856761508952, 8.01943005681818, 6.1978771195652165, 3.3188244607843136, 1.9176774999999997, 0.0, 5.373494015492254, 7.670709999999999, 4.978236691176471, 4.131918079710144, 2.004857514204545, 0.0), # 43
(3.936763235294117, 8.026250454545455, 6.18587, 3.317106274509803, 1.9150246808510636, 0.0, 5.339613218390804, 7.660098723404254, 4.975659411764705, 4.123913333333333, 2.0065626136363637, 0.0), # 44
(3.9435242167519178, 8.032947443181817, 6.1731317934782615, 3.315275735294117, 1.91220079787234, 0.0, 5.304029235382309, 7.64880319148936, 4.972913602941175, 4.115421195652174, 2.008236860795454, 0.0), # 45
(3.9501437340153456, 8.03952, 6.159684347826087, 3.313334901960784, 1.9092093617021275, 0.0, 5.266816691654173, 7.63683744680851, 4.970002352941176, 4.106456231884058, 2.00988, 0.0), # 46
(3.956625815217391, 8.045967102272726, 6.1455495108695635, 3.3112858333333324, 1.9060538829787232, 0.0, 5.228050212393803, 7.624215531914893, 4.966928749999999, 4.097033007246376, 2.0114917755681816, 0.0), # 47
(3.962974488491049, 8.052287727272727, 6.130749130434782, 3.309130588235293, 1.9027378723404254, 0.0, 5.187804422788607, 7.610951489361701, 4.96369588235294, 4.087166086956521, 2.013071931818182, 0.0), # 48
(3.9691937819693086, 8.058480852272725, 6.115305054347826, 3.306871225490196, 1.899264840425532, 0.0, 5.146153948025987, 7.597059361702128, 4.960306838235294, 4.076870036231884, 2.014620213068181, 0.0), # 49
(3.9752877237851663, 8.064545454545453, 6.099239130434782, 3.3045098039215683, 1.8956382978723403, 0.0, 5.103173413293353, 7.582553191489361, 4.956764705882353, 4.066159420289854, 2.016136363636363, 0.0), # 50
(3.9812603420716113, 8.070480511363634, 6.082573206521739, 3.302048382352941, 1.8918617553191486, 0.0, 5.0589374437781105, 7.567447021276594, 4.953072573529411, 4.055048804347826, 2.0176201278409085, 0.0), # 51
(3.987115664961637, 8.076284999999999, 6.065329130434782, 3.299489019607843, 1.8879387234042553, 0.0, 5.013520664667666, 7.551754893617021, 4.949233529411765, 4.043552753623188, 2.0190712499999997, 0.0), # 52
(3.992857720588235, 8.081957897727271, 6.047528749999999, 3.2968337745098037, 1.8838727127659571, 0.0, 4.966997701149425, 7.5354908510638285, 4.945250661764706, 4.0316858333333325, 2.020489474431818, 0.0), # 53
(3.9984905370843995, 8.08749818181818, 6.0291939130434775, 3.294084705882353, 1.8796672340425529, 0.0, 4.919443178410794, 7.5186689361702115, 4.941127058823529, 4.019462608695651, 2.021874545454545, 0.0), # 54
(4.00401814258312, 8.092904829545454, 6.010346467391303, 3.2912438725490194, 1.8753257978723403, 0.0, 4.87093172163918, 7.501303191489361, 4.936865808823529, 4.006897644927535, 2.0232262073863634, 0.0), # 55
(4.0094445652173905, 8.098176818181816, 5.991008260869564, 3.288313333333333, 1.8708519148936167, 0.0, 4.821537956021989, 7.483407659574467, 4.9324699999999995, 3.994005507246376, 2.024544204545454, 0.0), # 56
(4.014773833120205, 8.103313125, 5.971201141304347, 3.285295147058823, 1.8662490957446805, 0.0, 4.771336506746626, 7.464996382978722, 4.927942720588234, 3.980800760869564, 2.02582828125, 0.0), # 57
(4.0200099744245525, 8.108312727272725, 5.950946956521738, 3.2821913725490197, 1.8615208510638295, 0.0, 4.7204019990005, 7.446083404255318, 4.923287058823529, 3.9672979710144918, 2.0270781818181813, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
4, # 1
)
| 112.623881
| 213
| 0.727955
| 5,147
| 37,729
| 5.333981
| 0.22382
| 0.314708
| 0.249144
| 0.472062
| 0.331682
| 0.329788
| 0.329788
| 0.329788
| 0.329788
| 0.329788
| 0
| 0.818154
| 0.119643
| 37,729
| 334
| 214
| 112.961078
| 0.0084
| 0.032097
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e6e555640e9d566e30c1077470fabffd0e0fe28
| 77
|
py
|
Python
|
speckle_pattern/__init__.py
|
jankoslavic/speckle_pattern
|
7c2233a36b8870b3b5d712e21d18f0fb4f5da6e0
|
[
"MIT"
] | 16
|
2019-05-31T20:06:03.000Z
|
2022-01-04T06:42:28.000Z
|
speckle_pattern/__init__.py
|
EntPyle/speckle_pattern
|
7c2233a36b8870b3b5d712e21d18f0fb4f5da6e0
|
[
"MIT"
] | null | null | null |
speckle_pattern/__init__.py
|
EntPyle/speckle_pattern
|
7c2233a36b8870b3b5d712e21d18f0fb4f5da6e0
|
[
"MIT"
] | 12
|
2018-12-18T18:08:37.000Z
|
2021-11-13T00:17:07.000Z
|
from .speckle import generate_and_save, generate_lines, generate_checkerboard
| 77
| 77
| 0.896104
| 10
| 77
| 6.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064935
| 77
| 1
| 77
| 77
| 0.902778
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e185a315c65c0f483135ff29705243a6196dc0b
| 135
|
py
|
Python
|
query_builder/query_builder_empty.py
|
mbkr1992/weather-analytical-portal-rest-api
|
f4af9b308aa0ca02103bb81e7446ab641b5d7e2a
|
[
"MIT"
] | null | null | null |
query_builder/query_builder_empty.py
|
mbkr1992/weather-analytical-portal-rest-api
|
f4af9b308aa0ca02103bb81e7446ab641b5d7e2a
|
[
"MIT"
] | null | null | null |
query_builder/query_builder_empty.py
|
mbkr1992/weather-analytical-portal-rest-api
|
f4af9b308aa0ca02103bb81e7446ab641b5d7e2a
|
[
"MIT"
] | null | null | null |
from query_builder.query_builder import QueryBuilder
class QueryBuilderEmpty(QueryBuilder):
def build(self, params):
pass
| 22.5
| 52
| 0.77037
| 15
| 135
| 6.8
| 0.8
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17037
| 135
| 6
| 53
| 22.5
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
0e1932656b40274d93c0706e3ff1b0b460b18dcf
| 40
|
py
|
Python
|
rl_trader/engine/rl_environment/types/test/env_market_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
rl_trader/engine/rl_environment/types/test/env_market_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
rl_trader/engine/rl_environment/types/test/env_market_types.test.py
|
AlexandreMahdhaoui/rl_trader
|
5bda02622c7e17c4e6f28a90c510cfe8f914f7a8
|
[
"Apache-2.0"
] | null | null | null |
# TODO: PairMarketValue
# TODO: Market
| 10
| 23
| 0.725
| 4
| 40
| 7.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 3
| 24
| 13.333333
| 0.878788
| 0.85
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.333333
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e3826056d8eeaf711fea6bb9e7987cfedb97770
| 2,799
|
py
|
Python
|
radioxenon_ml/solve/matrix_values.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | null | null | null |
radioxenon_ml/solve/matrix_values.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | null | null | null |
radioxenon_ml/solve/matrix_values.py
|
sczyz/radioxenon_ml
|
73398f0060e88616c7652a72bdedf7f93ea17a20
|
[
"MIT"
] | 1
|
2018-04-23T20:52:43.000Z
|
2018-04-23T20:52:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 17:19:53 2018
@author: Steven
"""
import numpy as np
def j_matrix_val(S,D,f):
"""
Determines the variance of the number of counts in each channel i of the
vectorized version of the 2D coincidence spectrum for the qth iteration:
-S(np.array) is the original experimental spectrum
-D(np.array) is the determined variance value from variance()
-f(np.array) is the reference spectrum for each isotope as well as background
Equations are taken from the quite excellent paper:
Lowrey, Justin D., and Steven R.F. Biegalski. “Comparison of Least-
Squares vs. Maximum Likelihood Estimation for Standard Spectrum
Technique of Β−γ Coincidence Spectrum Analysis.” Nuclear Instruments
and Methods in Physics Research Section B: Beam Interactions with
Materials and Atoms 270 (January 2012): 116–19.
https://doi.org/10.1016/j.nimb.2011.09.005.
"""
J_temp = np.zeros((np.shape(f)[1],np.shape(f)[0]))
J = np.zeros((np.shape(f)[1],1))
for j in range(np.shape(f)[1]): #loop over # of isotopes
for i in range(np.shape(f)[0]): #loop over # of array elements
J_temp[j,i] = (S[i]*f[i,j])/D[i] #Eqn. 7
J[j] = np.sum(J_temp[j]) #sum all columns to make a column vector
return J
def k_matrix_val(D,f):
"""
Determines the variance of the number of counts in each channel i of the
vectorized version of the 2D coincidence spectrum for the qth iteration:
-D(np.array) is the determined variance value from variance()
-f(np.array) is the reference spectrum for each isotope as well as background
Equations are taken from the quite excellent paper:
Lowrey, Justin D., and Steven R.F. Biegalski. “Comparison of Least-
Squares vs. Maximum Likelihood Estimation for Standard Spectrum
Technique of Β−γ Coincidence Spectrum Analysis.” Nuclear Instruments
and Methods in Physics Research Section B: Beam Interactions with
Materials and Atoms 270 (January 2012): 116–19.
https://doi.org/10.1016/j.nimb.2011.09.005.
"""
K_element_temp = np.zeros((np.shape(f)[1],np.shape(f)[0]))
K = np.zeros((np.shape(f)[1],np.shape(f)[1]))
for m in range(np.shape(f)[1]): #loop over # of isotopes
for j in range(np.shape(f)[1]): #loop over # of isotopes again
for i in range(np.shape(f)[0]): #loop over # of array elements
K_element_temp[j,i] = (f[i,m]*f[i,j])/D[i] #Eqn. 7
K[m,j] = np.sum(K_element_temp[j]) #sum all elements to make an entry in the array
return K
| 39.985714
| 95
| 0.622008
| 442
| 2,799
| 3.918552
| 0.294118
| 0.048499
| 0.055427
| 0.04157
| 0.829099
| 0.829099
| 0.819861
| 0.809469
| 0.809469
| 0.795612
| 0
| 0.040766
| 0.272597
| 2,799
| 69
| 96
| 40.565217
| 0.807957
| 0.656306
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e93d555d556779c60518cc909097718895e4273
| 49
|
py
|
Python
|
my-webapp/my_domain.py
|
BernardNotarianni/spike-concourse
|
eb84f52c96688e94bdd6ee61d348257c8eee0040
|
[
"MIT"
] | null | null | null |
my-webapp/my_domain.py
|
BernardNotarianni/spike-concourse
|
eb84f52c96688e94bdd6ee61d348257c8eee0040
|
[
"MIT"
] | 1
|
2019-06-09T13:12:33.000Z
|
2019-06-09T13:12:33.000Z
|
my-webapp/my_domain.py
|
BernardNotarianni/spike-concourse
|
eb84f52c96688e94bdd6ee61d348257c8eee0040
|
[
"MIT"
] | null | null | null |
def my_message():
return "Hello Concourse!"
| 12.25
| 29
| 0.673469
| 6
| 49
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 49
| 3
| 30
| 16.333333
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
0ea6dd3e534279d8de4104b149933cf528ec0bd1
| 974
|
py
|
Python
|
findost/serverApp/database/DatabaseError.py
|
BongOST/FindOST
|
bef3c8991b7494ad08c06ed1c9fb0bc41996ec8b
|
[
"Apache-2.0"
] | 1
|
2017-11-12T03:10:25.000Z
|
2017-11-12T03:10:25.000Z
|
findost/serverApp/database/DatabaseError.py
|
BongOST/FindOST
|
bef3c8991b7494ad08c06ed1c9fb0bc41996ec8b
|
[
"Apache-2.0"
] | null | null | null |
findost/serverApp/database/DatabaseError.py
|
BongOST/FindOST
|
bef3c8991b7494ad08c06ed1c9fb0bc41996ec8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
__author__ = 'lee'
"""
数据库自定义异常
"""
class ExistError(Exception):
def __init__(self, exist, message):
self.exist = exist
self.message = message
def __str__(self):
return repr(self.message)
class NotExistError(Exception):
def __init__(self, notexist, message):
self.notexist = notexist
self.message = message
def __str__(self):
return repr(self.message)
class OutRangeError(Exception):
def __init__(self, table, message):
self.table = table
self.message = message
def __str__(self):
return repr(self.message)
class InsertError(Exception):
def __init__(self, table, message):
self.table = table
self.message = message
def __str__(self):
return repr(self.message)
class CommonError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| 20.723404
| 42
| 0.635524
| 107
| 974
| 5.373832
| 0.214953
| 0.210435
| 0.13913
| 0.173913
| 0.62087
| 0.62087
| 0.62087
| 0.62087
| 0.62087
| 0.62087
| 0
| 0.001381
| 0.256674
| 974
| 46
| 43
| 21.173913
| 0.792818
| 0.020534
| 0
| 0.633333
| 0
| 0
| 0.003205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7d1d6343b048ef31eb6f2a0fed69b1e88ee35132
| 128
|
py
|
Python
|
app/api/__init__.py
|
yc19890920/flask-blog
|
d2aa57bd876e41a18a791c0b110bb31b86133ead
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
yc19890920/flask-blog
|
d2aa57bd876e41a18a791c0b110bb31b86133ead
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
yc19890920/flask-blog
|
d2aa57bd876e41a18a791c0b110bb31b86133ead
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, posts, comments, errors # users
| 25.6
| 61
| 0.742188
| 15
| 128
| 6.066667
| 0.733333
| 0.263736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 128
| 5
| 61
| 25.6
| 0.858491
| 0.039063
| 0
| 0
| 0
| 0
| 0.025424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
ade50d3dc4937799eaa3095e5d8a35a75955fdd7
| 162
|
py
|
Python
|
tests/unit/test_values.py
|
prog-serhii/MyPorfolio
|
ab8d06925650a4b5809a33669a5d315dc87c56ec
|
[
"MIT"
] | null | null | null |
tests/unit/test_values.py
|
prog-serhii/MyPorfolio
|
ab8d06925650a4b5809a33669a5d315dc87c56ec
|
[
"MIT"
] | null | null | null |
tests/unit/test_values.py
|
prog-serhii/MyPorfolio
|
ab8d06925650a4b5809a33669a5d315dc87c56ec
|
[
"MIT"
] | null | null | null |
import random
from decimal import Decimal
from domain.values import Money
def test_equality():
assert Money('uah', Decimal()) == Money('uah', Decimal(50))
| 18
| 63
| 0.722222
| 22
| 162
| 5.272727
| 0.590909
| 0.137931
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0.154321
| 162
| 8
| 64
| 20.25
| 0.832117
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ade83ed2a02d34b8387677d6ded173699b1ca4d8
| 1,404
|
py
|
Python
|
src/colleges/migrations/0004_auto_20160120_1231.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
src/colleges/migrations/0004_auto_20160120_1231.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
src/colleges/migrations/0004_auto_20160120_1231.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-20 12:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('colleges', '0003_auto_20160120_1034'),
]
operations = [
migrations.AlterField(
model_name='college',
name='college_logo',
field=models.ImageField(upload_to='colleges/college_photos'),
),
migrations.AlterField(
model_name='college',
name='college_photo1',
field=models.ImageField(upload_to='colleges/college_photos'),
),
migrations.AlterField(
model_name='college',
name='college_photo2',
field=models.ImageField(upload_to='colleges/college_photos'),
),
migrations.AlterField(
model_name='college',
name='college_photo3',
field=models.ImageField(upload_to='colleges/college_photos'),
),
migrations.AlterField(
model_name='college',
name='college_photo4',
field=models.ImageField(upload_to='colleges/college_photos'),
),
migrations.AlterField(
model_name='college',
name='college_photo5',
field=models.ImageField(upload_to='colleges/college_photos'),
),
]
| 30.521739
| 73
| 0.597578
| 132
| 1,404
| 6.113636
| 0.348485
| 0.163569
| 0.185874
| 0.215613
| 0.72119
| 0.72119
| 0.72119
| 0.662949
| 0.600991
| 0.600991
| 0
| 0.036036
| 0.288462
| 1,404
| 45
| 74
| 31.2
| 0.771772
| 0.046296
| 0
| 0.631579
| 1
| 0
| 0.219311
| 0.120509
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
adf3d8f8208184767f05cf55b646aae17c4a25b1
| 8,295
|
py
|
Python
|
tests/test_templates.py
|
clokep/mwcomposerfromhell
|
02ba160ad55ee7fc1b69834cd5cb256e98a52648
|
[
"0BSD"
] | 3
|
2019-10-03T06:46:19.000Z
|
2021-09-25T13:39:32.000Z
|
tests/test_templates.py
|
clokep/mwcomposerfromhell
|
02ba160ad55ee7fc1b69834cd5cb256e98a52648
|
[
"0BSD"
] | 1
|
2020-03-18T07:24:28.000Z
|
2020-05-07T11:58:59.000Z
|
tests/test_templates.py
|
clokep/mwcomposerfromhell
|
02ba160ad55ee7fc1b69834cd5cb256e98a52648
|
[
"0BSD"
] | 1
|
2021-07-05T11:30:13.000Z
|
2021-07-05T11:30:13.000Z
|
import mwparserfromhell
from mwcomposerfromhell import (
ArticleResolver,
compose,
Namespace,
WikicodeToHtmlComposer,
)
def _get_composer(templates):
resolver = ArticleResolver()
resolver.add_namespace("Template", Namespace(templates))
return WikicodeToHtmlComposer(resolver=resolver)
def test_simple():
"""Render a simple template."""
# A simple template that's just a string.
template = "This is a test"
templates = {"temp": mwparserfromhell.parse(template)}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == "<p>" + template + "</p>"
def test_with_args():
"""Render a content with a template that has arguments."""
# Template that uses both a position and keyword argument.
templates = {"temp": mwparserfromhell.parse('This is a "{{{1}}}" "{{{key}}}"')}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp|foobar|key=value}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "foobar" "value"</p>'
def test_with_default_args():
"""Render a template where arguments fall back to default values."""
# Template that uses a position argument and a keyword argument, both with
# defaults.
templates = {
"temp": mwparserfromhell.parse('This is a "{{{1|first}}}" "{{{key|second}}}"')
}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "first" "second"</p>'
def test_with_blank_default_args():
"""Render a template where arguments fall back to blank values."""
# Template that uses a position argument and a keyword argument, both with
# blank defaults.
templates = {"temp": mwparserfromhell.parse('This is a "{{{1|}}}" "{{{key|}}}"')}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "" ""</p>'
def test_with_replaced_default_arg():
"""A default argument that is another replacement."""
# Template that uses a position argument and a keyword argument, both with
# defaults.
templates = {
"temp": mwparserfromhell.parse(
'This is a "{{{1|foo {{{default}}}}}}" "{{{key|foo {{{default}}}}}}"'
)
}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp|default=bar}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "foo bar" "foo bar"</p>'
def test_without_default_args():
"""Render a template where arguments fall back to their keys."""
# Template that uses a position argument and a keyword argument, without
# defaults.
template = 'This is a "{{{1}}}" "{{{key}}}"'
templates = {"temp": mwparserfromhell.parse(template)}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == "<p>" + template + "</p>"
def test_complex_name():
"""A template name that gets rendered via a different template."""
templates = {
"text": mwparserfromhell.parse("{{{1}}}"),
"temp": mwparserfromhell.parse("This is a test"),
}
# Parse the main content. The name of the template is given by another template
wikicode = mwparserfromhell.parse("{{t{{text|em}}p}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == "<p>This is a test</p>"
def test_complex_parameter_name():
"""A template name that gets rendered via a different template."""
templates = {
"text": mwparserfromhell.parse("{{{1}}}"),
"temp": mwparserfromhell.parse('This is a "{{{1}}}" "{{{key}}}"'),
}
# Parse the main content. The name of the template is given by another template
wikicode = mwparserfromhell.parse("{{temp|first|k{{text|ey}}=second}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "first" "second"</p>'
def test_complex_parameter_value():
"""A template name that gets rendered via a different template."""
templates = {
"text": mwparserfromhell.parse("{{{1}}}"),
"temp": mwparserfromhell.parse('This is a "{{{1}}}" "{{{key}}}"'),
}
# Parse the main content. The name of the template is given by another template
wikicode = mwparserfromhell.parse("{{temp|fi{{text|rst}}|key={{text|sec}}ond}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "first" "second"</p>'
def test_complex_arg():
"""An argument name gets generated in a complex fashion."""
templates = {
"text": mwparserfromhell.parse("{{{1}}}"),
"temp": mwparserfromhell.parse(
'This is a "{{{ {{text|1}} }}}" "{{{ {{text|key}} }}}"'
),
}
# Parse the main content. The name of the template is given by another template
wikicode = mwparserfromhell.parse("{{temp|first|key=second}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a "first" "second"</p>'
def test_spaces():
"""Spaces around a template name should be ignored."""
# A simple template that's just a string.
template = "This is a test"
templates = {"temp": mwparserfromhell.parse(template)}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{ temp }}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == "<p>" + template + "</p>"
def test_spaces_with_parameter():
"""Spaces around keyword parameters should be removed."""
# Template that uses both a position and keyword argument.
templates = {"temp": mwparserfromhell.parse('This is a "{{{1}}}" "{{{key}}}"')}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp| foobar | key = value}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == '<p>This is a " foobar " "value"</p>'
def test_capitalization():
"""MediaWiki treats the first character as case-insensitive."""
# A simple template that's just a string.
template = "This is a test"
templates = {"temp": mwparserfromhell.parse(template)}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{Temp}}")
# Render the result.
composer = _get_composer(templates)
assert composer.compose(wikicode) == "<p>" + template + "</p>"
def test_wikilink():
"""Parameters in Wikilinks should be replaced."""
templates = {"temp": mwparserfromhell.parse("[[{{{1}}}|See more at {{{1}}}]]")}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp|foobar}}")
# Render the result.
composer = _get_composer(templates)
assert (
composer.compose(wikicode)
== '<p><a href="/wiki/Foobar" title="Foobar">See more at foobar</a></p>'
)
def test_externallink():
"""Parameters in external links should be replaced."""
templates = {"temp": mwparserfromhell.parse("[https://{{{1}}}.com {{{1}}}]")}
# Parse the main content.
wikicode = mwparserfromhell.parse("{{temp|foobar}}")
# Render the result.
composer = _get_composer(templates)
assert (
composer.compose(wikicode) == '<p><a href="https://foobar.com">foobar</a></p>'
)
def test_unknown():
"""An unknown template gets rendered as is."""
content = "{{temp}}"
wikicode = mwparserfromhell.parse(content)
# Render the result.
assert compose(wikicode) == "<p>" + content + "</p>"
def test_unknown_duplicate():
"""An unknown template gets rendered as is."""
content = "{{temp}}{{temp}}"
wikicode = mwparserfromhell.parse(content)
# Render the result.
assert compose(wikicode) == "<p>" + content + "</p>"
| 32.529412
| 86
| 0.6434
| 971
| 8,295
| 5.424305
| 0.123584
| 0.143535
| 0.029239
| 0.054111
| 0.814505
| 0.800456
| 0.800456
| 0.78147
| 0.771597
| 0.754129
| 0
| 0.002571
| 0.203014
| 8,295
| 254
| 87
| 32.65748
| 0.794131
| 0.283303
| 0
| 0.512195
| 0
| 0.00813
| 0.225623
| 0.02184
| 0
| 0
| 0
| 0
| 0.138211
| 1
| 0.146341
| false
| 0
| 0.01626
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
adfcf06ac1569b8f6c285d4e3ed2141c330c5572
| 28,042
|
py
|
Python
|
scicopia/app/parser/ScicopiaLexer.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | null | null | null |
scicopia/app/parser/ScicopiaLexer.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | 9
|
2021-07-24T16:12:03.000Z
|
2021-07-24T16:58:19.000Z
|
scicopia/app/parser/ScicopiaLexer.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | 1
|
2021-06-18T16:00:06.000Z
|
2021-06-18T16:00:06.000Z
|
# Generated from Scicopia.g4 by ANTLR 4.9.2
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\23")
buf.write("\u00c5\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\3\2\3\2\3\3\3\3\3")
buf.write("\4\3\4\3\5\3\5\3\5\6\5\67\n\5\r\5\16\58\3\5\3\5\3\5\6")
buf.write("\5>\n\5\r\5\16\5?\3\5\3\5\3\5\7\5E\n\5\f\5\16\5H\13\5")
buf.write("\5\5J\n\5\3\6\3\6\3\6\3\6\6\6P\n\6\r\6\16\6Q\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\5\7Z\n\7\6\7\\\n\7\r\7\16\7]\3\7\3\7\5")
buf.write("\7b\n\7\3\7\3\7\3\7\3\7\3\7\7\7i\n\7\f\7\16\7l\13\7\5")
buf.write("\7n\n\7\3\7\3\7\3\7\3\7\3\7\7\7u\n\7\f\7\16\7x\13\7\6")
buf.write("\7z\n\7\r\7\16\7{\5\7~\n\7\3\b\3\b\3\b\6\b\u0083\n\b\r")
buf.write("\b\16\b\u0084\3\t\3\t\3\n\6\n\u008a\n\n\r\n\16\n\u008b")
buf.write("\3\13\6\13\u008f\n\13\r\13\16\13\u0090\3\f\6\f\u0094\n")
buf.write("\f\r\f\16\f\u0095\3\f\3\f\3\r\6\r\u009b\n\r\r\r\16\r\u009c")
buf.write("\3\r\3\r\3\16\3\16\6\16\u00a3\n\16\r\16\16\16\u00a4\3")
buf.write("\17\3\17\3\17\6\17\u00aa\n\17\r\17\16\17\u00ab\3\20\3")
buf.write("\20\3\21\3\21\3\22\3\22\7\22\u00b4\n\22\f\22\16\22\u00b7")
buf.write("\13\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\6\26\u00c0\n")
buf.write("\26\r\26\16\26\u00c1\3\26\3\26\2\2\27\3\3\5\4\7\5\t\6")
buf.write("\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write("\37\21!\22#\2%\2\'\2)\2+\23\3\2\6\6\2--//GGUU\4\2--//")
buf.write("\t\2##%(,.\60\61=B`a\u0080\u0080\5\2\13\f\16\17\"\"\5")
buf.write("\u024b\2C\2\\\2c\2|\2\u00ac\2\u00ac\2\u00b7\2\u00b7\2")
buf.write("\u00bc\2\u00bc\2\u00c2\2\u00d8\2\u00da\2\u00f8\2\u00fa")
buf.write("\2\u02c3\2\u02c8\2\u02d3\2\u02e2\2\u02e6\2\u02ee\2\u02ee")
buf.write("\2\u02f0\2\u02f0\2\u0372\2\u0376\2\u0378\2\u0379\2\u037c")
buf.write("\2\u037f\2\u0381\2\u0381\2\u0388\2\u0388\2\u038a\2\u038c")
buf.write("\2\u038e\2\u038e\2\u0390\2\u03a3\2\u03a5\2\u03f7\2\u03f9")
buf.write("\2\u0483\2\u048c\2\u0531\2\u0533\2\u0558\2\u055b\2\u055b")
buf.write("\2\u0563\2\u0589\2\u05d2\2\u05ec\2\u05f2\2\u05f4\2\u0622")
buf.write("\2\u064c\2\u0670\2\u0671\2\u0673\2\u06d5\2\u06d7\2\u06d7")
buf.write("\2\u06e7\2\u06e8\2\u06f0\2\u06f1\2\u06fc\2\u06fe\2\u0701")
buf.write("\2\u0701\2\u0712\2\u0712\2\u0714\2\u0731\2\u074f\2\u07a7")
buf.write("\2\u07b3\2\u07b3\2\u07cc\2\u07ec\2\u07f6\2\u07f7\2\u07fc")
buf.write("\2\u07fc\2\u0802\2\u0817\2\u081c\2\u081c\2\u0826\2\u0826")
buf.write("\2\u082a\2\u082a\2\u0842\2\u085a\2\u0862\2\u086c\2\u08a2")
buf.write("\2\u08b6\2\u08b8\2\u08bf\2\u0906\2\u093b\2\u093f\2\u093f")
buf.write("\2\u0952\2\u0952\2\u095a\2\u0963\2\u0973\2\u0982\2\u0987")
buf.write("\2\u098e\2\u0991\2\u0992\2\u0995\2\u09aa\2\u09ac\2\u09b2")
buf.write("\2\u09b4\2\u09b4\2\u09b8\2\u09bb\2\u09bf\2\u09bf\2\u09d0")
buf.write("\2\u09d0\2\u09de\2\u09df\2\u09e1\2\u09e3\2\u09f2\2\u09f3")
buf.write("\2\u09fe\2\u09fe\2\u0a07\2\u0a0c\2\u0a11\2\u0a12\2\u0a15")
buf.write("\2\u0a2a\2\u0a2c\2\u0a32\2\u0a34\2\u0a35\2\u0a37\2\u0a38")
buf.write("\2\u0a3a\2\u0a3b\2\u0a5b\2\u0a5e\2\u0a60\2\u0a60\2\u0a74")
buf.write("\2\u0a76\2\u0a87\2\u0a8f\2\u0a91\2\u0a93\2\u0a95\2\u0aaa")
buf.write("\2\u0aac\2\u0ab2\2\u0ab4\2\u0ab5\2\u0ab7\2\u0abb\2\u0abf")
buf.write("\2\u0abf\2\u0ad2\2\u0ad2\2\u0ae2\2\u0ae3\2\u0afb\2\u0afb")
buf.write("\2\u0b07\2\u0b0e\2\u0b11\2\u0b12\2\u0b15\2\u0b2a\2\u0b2c")
buf.write("\2\u0b32\2\u0b34\2\u0b35\2\u0b37\2\u0b3b\2\u0b3f\2\u0b3f")
buf.write("\2\u0b5e\2\u0b5f\2\u0b61\2\u0b63\2\u0b73\2\u0b73\2\u0b85")
buf.write("\2\u0b85\2\u0b87\2\u0b8c\2\u0b90\2\u0b92\2\u0b94\2\u0b97")
buf.write("\2\u0b9b\2\u0b9c\2\u0b9e\2\u0b9e\2\u0ba0\2\u0ba1\2\u0ba5")
buf.write("\2\u0ba6\2\u0baa\2\u0bac\2\u0bb0\2\u0bbb\2\u0bd2\2\u0bd2")
buf.write("\2\u0c07\2\u0c0e\2\u0c10\2\u0c12\2\u0c14\2\u0c2a\2\u0c2c")
buf.write("\2\u0c3b\2\u0c3f\2\u0c3f\2\u0c5a\2\u0c5c\2\u0c62\2\u0c63")
buf.write("\2\u0c82\2\u0c82\2\u0c87\2\u0c8e\2\u0c90\2\u0c92\2\u0c94")
buf.write("\2\u0caa\2\u0cac\2\u0cb5\2\u0cb7\2\u0cbb\2\u0cbf\2\u0cbf")
buf.write("\2\u0ce0\2\u0ce0\2\u0ce2\2\u0ce3\2\u0cf3\2\u0cf4\2\u0d07")
buf.write("\2\u0d0e\2\u0d10\2\u0d12\2\u0d14\2\u0d3c\2\u0d3f\2\u0d3f")
buf.write("\2\u0d50\2\u0d50\2\u0d56\2\u0d58\2\u0d61\2\u0d63\2\u0d7c")
buf.write("\2\u0d81\2\u0d87\2\u0d98\2\u0d9c\2\u0db3\2\u0db5\2\u0dbd")
buf.write("\2\u0dbf\2\u0dbf\2\u0dc2\2\u0dc8\2\u0e03\2\u0e32\2\u0e34")
buf.write("\2\u0e35\2\u0e42\2\u0e48\2\u0e83\2\u0e84\2\u0e86\2\u0e86")
buf.write("\2\u0e89\2\u0e8a\2\u0e8c\2\u0e8c\2\u0e8f\2\u0e8f\2\u0e96")
buf.write("\2\u0e99\2\u0e9b\2\u0ea1\2\u0ea3\2\u0ea5\2\u0ea7\2\u0ea7")
buf.write("\2\u0ea9\2\u0ea9\2\u0eac\2\u0ead\2\u0eaf\2\u0eb2\2\u0eb4")
buf.write("\2\u0eb5\2\u0ebf\2\u0ebf\2\u0ec2\2\u0ec6\2\u0ec8\2\u0ec8")
buf.write("\2\u0ede\2\u0ee1\2\u0f02\2\u0f02\2\u0f42\2\u0f49\2\u0f4b")
buf.write("\2\u0f6e\2\u0f8a\2\u0f8e\2\u1002\2\u102c\2\u1041\2\u1041")
buf.write("\2\u1052\2\u1057\2\u105c\2\u105f\2\u1063\2\u1063\2\u1067")
buf.write("\2\u1068\2\u1070\2\u1072\2\u1077\2\u1083\2\u1090\2\u1090")
buf.write("\2\u10a2\2\u10c7\2\u10c9\2\u10c9\2\u10cf\2\u10cf\2\u10d2")
buf.write("\2\u10fc\2\u10fe\2\u124a\2\u124c\2\u124f\2\u1252\2\u1258")
buf.write("\2\u125a\2\u125a\2\u125c\2\u125f\2\u1262\2\u128a\2\u128c")
buf.write("\2\u128f\2\u1292\2\u12b2\2\u12b4\2\u12b7\2\u12ba\2\u12c0")
buf.write("\2\u12c2\2\u12c2\2\u12c4\2\u12c7\2\u12ca\2\u12d8\2\u12da")
buf.write("\2\u1312\2\u1314\2\u1317\2\u131a\2\u135c\2\u1382\2\u1391")
buf.write("\2\u13a2\2\u13f7\2\u13fa\2\u13ff\2\u1403\2\u166e\2\u1671")
buf.write("\2\u1681\2\u1683\2\u169c\2\u16a2\2\u16ec\2\u16f3\2\u16fa")
buf.write("\2\u1702\2\u170e\2\u1710\2\u1713\2\u1722\2\u1733\2\u1742")
buf.write("\2\u1753\2\u1762\2\u176e\2\u1770\2\u1772\2\u1782\2\u17b5")
buf.write("\2\u17d9\2\u17d9\2\u17de\2\u17de\2\u1822\2\u1879\2\u1882")
buf.write("\2\u1886\2\u1889\2\u18aa\2\u18ac\2\u18ac\2\u18b2\2\u18f7")
buf.write("\2\u1902\2\u1920\2\u1952\2\u196f\2\u1972\2\u1976\2\u1982")
buf.write("\2\u19ad\2\u19b2\2\u19cb\2\u1a02\2\u1a18\2\u1a22\2\u1a56")
buf.write("\2\u1aa9\2\u1aa9\2\u1b07\2\u1b35\2\u1b47\2\u1b4d\2\u1b85")
buf.write("\2\u1ba2\2\u1bb0\2\u1bb1\2\u1bbc\2\u1be7\2\u1c02\2\u1c25")
buf.write("\2\u1c4f\2\u1c51\2\u1c5c\2\u1c7f\2\u1c82\2\u1c8a\2\u1ceb")
buf.write("\2\u1cee\2\u1cf0\2\u1cf3\2\u1cf7\2\u1cf8\2\u1d02\2\u1dc1")
buf.write("\2\u1e02\2\u1f17\2\u1f1a\2\u1f1f\2\u1f22\2\u1f47\2\u1f4a")
buf.write("\2\u1f4f\2\u1f52\2\u1f59\2\u1f5b\2\u1f5b\2\u1f5d\2\u1f5d")
buf.write("\2\u1f5f\2\u1f5f\2\u1f61\2\u1f7f\2\u1f82\2\u1fb6\2\u1fb8")
buf.write("\2\u1fbe\2\u1fc0\2\u1fc0\2\u1fc4\2\u1fc6\2\u1fc8\2\u1fce")
buf.write("\2\u1fd2\2\u1fd5\2\u1fd8\2\u1fdd\2\u1fe2\2\u1fee\2\u1ff4")
buf.write("\2\u1ff6\2\u1ff8\2\u1ffe\2\u2073\2\u2073\2\u2081\2\u2081")
buf.write("\2\u2092\2\u209e\2\u2104\2\u2104\2\u2109\2\u2109\2\u210c")
buf.write("\2\u2115\2\u2117\2\u2117\2\u211b\2\u211f\2\u2126\2\u2126")
buf.write("\2\u2128\2\u2128\2\u212a\2\u212a\2\u212c\2\u212f\2\u2131")
buf.write("\2\u213b\2\u213e\2\u2141\2\u2147\2\u214b\2\u2150\2\u2150")
buf.write("\2\u2185\2\u2186\2\u2c02\2\u2c30\2\u2c32\2\u2c60\2\u2c62")
buf.write("\2\u2ce6\2\u2ced\2\u2cf0\2\u2cf4\2\u2cf5\2\u2d02\2\u2d27")
buf.write("\2\u2d29\2\u2d29\2\u2d2f\2\u2d2f\2\u2d32\2\u2d69\2\u2d71")
buf.write("\2\u2d71\2\u2d82\2\u2d98\2\u2da2\2\u2da8\2\u2daa\2\u2db0")
buf.write("\2\u2db2\2\u2db8\2\u2dba\2\u2dc0\2\u2dc2\2\u2dc8\2\u2dca")
buf.write("\2\u2dd0\2\u2dd2\2\u2dd8\2\u2dda\2\u2de0\2\u2e31\2\u2e31")
buf.write("\2\u3007\2\u3008\2\u3033\2\u3037\2\u303d\2\u303e\2\u3043")
buf.write("\2\u3098\2\u309f\2\u30a1\2\u30a3\2\u30fc\2\u30fe\2\u3101")
buf.write("\2\u3107\2\u3130\2\u3133\2\u3190\2\u31a2\2\u31bc\2\u31f2")
buf.write("\2\u3201\2\u3402\2\u4db7\2\u4e02\2\u9fec\2\ua002\2\ua48e")
buf.write("\2\ua4d2\2\ua4ff\2\ua502\2\ua60e\2\ua612\2\ua621\2\ua62c")
buf.write("\2\ua62d\2\ua642\2\ua670\2\ua681\2\ua69f\2\ua6a2\2\ua6e7")
buf.write("\2\ua719\2\ua721\2\ua724\2\ua78a\2\ua78d\2\ua7b0\2\ua7b2")
buf.write("\2\ua7b9\2\ua7f9\2\ua803\2\ua805\2\ua807\2\ua809\2\ua80c")
buf.write("\2\ua80e\2\ua824\2\ua842\2\ua875\2\ua884\2\ua8b5\2\ua8f4")
buf.write("\2\ua8f9\2\ua8fd\2\ua8fd\2\ua8ff\2\ua8ff\2\ua90c\2\ua927")
buf.write("\2\ua932\2\ua948\2\ua962\2\ua97e\2\ua986\2\ua9b4\2\ua9d1")
buf.write("\2\ua9d1\2\ua9e2\2\ua9e6\2\ua9e8\2\ua9f1\2\ua9fc\2\uaa00")
buf.write("\2\uaa02\2\uaa2a\2\uaa42\2\uaa44\2\uaa46\2\uaa4d\2\uaa62")
buf.write("\2\uaa78\2\uaa7c\2\uaa7c\2\uaa80\2\uaab1\2\uaab3\2\uaab3")
buf.write("\2\uaab7\2\uaab8\2\uaabb\2\uaabf\2\uaac2\2\uaac2\2\uaac4")
buf.write("\2\uaac4\2\uaadd\2\uaadf\2\uaae2\2\uaaec\2\uaaf4\2\uaaf6")
buf.write("\2\uab03\2\uab08\2\uab0b\2\uab10\2\uab13\2\uab18\2\uab22")
buf.write("\2\uab28\2\uab2a\2\uab30\2\uab32\2\uab5c\2\uab5e\2\uab67")
buf.write("\2\uab72\2\uabe4\2\uac02\2\ud7a5\2\ud7b2\2\ud7c8\2\ud7cd")
buf.write("\2\ud7fd\2\uf902\2\ufa6f\2\ufa72\2\ufadb\2\ufb02\2\ufb08")
buf.write("\2\ufb15\2\ufb19\2\ufb1f\2\ufb1f\2\ufb21\2\ufb2a\2\ufb2c")
buf.write("\2\ufb38\2\ufb3a\2\ufb3e\2\ufb40\2\ufb40\2\ufb42\2\ufb43")
buf.write("\2\ufb45\2\ufb46\2\ufb48\2\ufbb3\2\ufbd5\2\ufd3f\2\ufd52")
buf.write("\2\ufd91\2\ufd94\2\ufdc9\2\ufdf2\2\ufdfd\2\ufe72\2\ufe76")
buf.write("\2\ufe78\2\ufefe\2\uff23\2\uff3c\2\uff43\2\uff5c\2\uff68")
buf.write("\2\uffc0\2\uffc4\2\uffc9\2\uffcc\2\uffd1\2\uffd4\2\uffd9")
buf.write("\2\uffdc\2\uffde\2\2\3\r\3\17\3(\3*\3<\3>\3?\3A\3O\3R")
buf.write("\3_\3\u0082\3\u00fc\3\u0282\3\u029e\3\u02a2\3\u02d2\3")
buf.write("\u0302\3\u0321\3\u032f\3\u0342\3\u0344\3\u034b\3\u0352")
buf.write("\3\u0377\3\u0382\3\u039f\3\u03a2\3\u03c5\3\u03ca\3\u03d1")
buf.write("\3\u0402\3\u049f\3\u04b2\3\u04d5\3\u04da\3\u04fd\3\u0502")
buf.write("\3\u0529\3\u0532\3\u0565\3\u0602\3\u0738\3\u0742\3\u0757")
buf.write("\3\u0762\3\u0769\3\u0802\3\u0807\3\u080a\3\u080a\3\u080c")
buf.write("\3\u0837\3\u0839\3\u083a\3\u083e\3\u083e\3\u0841\3\u0857")
buf.write("\3\u0862\3\u0878\3\u0882\3\u08a0\3\u08e2\3\u08f4\3\u08f6")
buf.write("\3\u08f7\3\u0902\3\u0917\3\u0922\3\u093b\3\u0982\3\u09b9")
buf.write("\3\u09c0\3\u09c1\3\u0a02\3\u0a02\3\u0a12\3\u0a15\3\u0a17")
buf.write("\3\u0a19\3\u0a1b\3\u0a35\3\u0a62\3\u0a7e\3\u0a82\3\u0a9e")
buf.write("\3\u0ac2\3\u0ac9\3\u0acb\3\u0ae6\3\u0b02\3\u0b37\3\u0b42")
buf.write("\3\u0b57\3\u0b62\3\u0b74\3\u0b82\3\u0b93\3\u0c02\3\u0c4a")
buf.write("\3\u0c82\3\u0cb4\3\u0cc2\3\u0cf4\3\u1005\3\u1039\3\u1085")
buf.write("\3\u10b1\3\u10d2\3\u10ea\3\u1105\3\u1128\3\u1152\3\u1174")
buf.write("\3\u1178\3\u1178\3\u1185\3\u11b4\3\u11c3\3\u11c6\3\u11dc")
buf.write("\3\u11dc\3\u11de\3\u11de\3\u1202\3\u1213\3\u1215\3\u122d")
buf.write("\3\u1282\3\u1288\3\u128a\3\u128a\3\u128c\3\u128f\3\u1291")
buf.write("\3\u129f\3\u12a1\3\u12aa\3\u12b2\3\u12e0\3\u1307\3\u130e")
buf.write("\3\u1311\3\u1312\3\u1315\3\u132a\3\u132c\3\u1332\3\u1334")
buf.write("\3\u1335\3\u1337\3\u133b\3\u133f\3\u133f\3\u1352\3\u1352")
buf.write("\3\u135f\3\u1363\3\u1402\3\u1436\3\u1449\3\u144c\3\u1482")
buf.write("\3\u14b1\3\u14c6\3\u14c7\3\u14c9\3\u14c9\3\u1582\3\u15b0")
buf.write("\3\u15da\3\u15dd\3\u1602\3\u1631\3\u1646\3\u1646\3\u1682")
buf.write("\3\u16ac\3\u1702\3\u171b\3\u18a2\3\u18e1\3\u1901\3\u1901")
buf.write("\3\u1a02\3\u1a02\3\u1a0d\3\u1a34\3\u1a3c\3\u1a3c\3\u1a52")
buf.write("\3\u1a52\3\u1a5e\3\u1a85\3\u1a88\3\u1a8b\3\u1ac2\3\u1afa")
buf.write("\3\u1c02\3\u1c0a\3\u1c0c\3\u1c30\3\u1c42\3\u1c42\3\u1c74")
buf.write("\3\u1c91\3\u1d02\3\u1d08\3\u1d0a\3\u1d0b\3\u1d0d\3\u1d32")
buf.write("\3\u1d48\3\u1d48\3\u2002\3\u239b\3\u2482\3\u2545\3\u3002")
buf.write("\3\u3430\3\u4402\3\u4648\3\u6802\3\u6a3a\3\u6a42\3\u6a60")
buf.write("\3\u6ad2\3\u6aef\3\u6b02\3\u6b31\3\u6b42\3\u6b45\3\u6b65")
buf.write("\3\u6b79\3\u6b7f\3\u6b91\3\u6f02\3\u6f46\3\u6f52\3\u6f52")
buf.write("\3\u6f95\3\u6fa1\3\u6fe2\3\u6fe3\3\u7002\3\u87ee\3\u8802")
buf.write("\3\u8af4\3\ub002\3\ub120\3\ub172\3\ub2fd\3\ubc02\3\ubc6c")
buf.write("\3\ubc72\3\ubc7e\3\ubc82\3\ubc8a\3\ubc92\3\ubc9b\3\ud402")
buf.write("\3\ud456\3\ud458\3\ud49e\3\ud4a0\3\ud4a1\3\ud4a4\3\ud4a4")
buf.write("\3\ud4a7\3\ud4a8\3\ud4ab\3\ud4ae\3\ud4b0\3\ud4bb\3\ud4bd")
buf.write("\3\ud4bd\3\ud4bf\3\ud4c5\3\ud4c7\3\ud507\3\ud509\3\ud50c")
buf.write("\3\ud50f\3\ud516\3\ud518\3\ud51e\3\ud520\3\ud53b\3\ud53d")
buf.write("\3\ud540\3\ud542\3\ud546\3\ud548\3\ud548\3\ud54c\3\ud552")
buf.write("\3\ud554\3\ud6a7\3\ud6aa\3\ud6c2\3\ud6c4\3\ud6dc\3\ud6de")
buf.write("\3\ud6fc\3\ud6fe\3\ud716\3\ud718\3\ud736\3\ud738\3\ud750")
buf.write("\3\ud752\3\ud770\3\ud772\3\ud78a\3\ud78c\3\ud7aa\3\ud7ac")
buf.write("\3\ud7c4\3\ud7c6\3\ud7cd\3\ue802\3\ue8c6\3\ue902\3\ue945")
buf.write("\3\uee02\3\uee05\3\uee07\3\uee21\3\uee23\3\uee24\3\uee26")
buf.write("\3\uee26\3\uee29\3\uee29\3\uee2b\3\uee34\3\uee36\3\uee39")
buf.write("\3\uee3b\3\uee3b\3\uee3d\3\uee3d\3\uee44\3\uee44\3\uee49")
buf.write("\3\uee49\3\uee4b\3\uee4b\3\uee4d\3\uee4d\3\uee4f\3\uee51")
buf.write("\3\uee53\3\uee54\3\uee56\3\uee56\3\uee59\3\uee59\3\uee5b")
buf.write("\3\uee5b\3\uee5d\3\uee5d\3\uee5f\3\uee5f\3\uee61\3\uee61")
buf.write("\3\uee63\3\uee64\3\uee66\3\uee66\3\uee69\3\uee6c\3\uee6e")
buf.write("\3\uee74\3\uee76\3\uee79\3\uee7b\3\uee7e\3\uee80\3\uee80")
buf.write("\3\uee82\3\uee8b\3\uee8d\3\uee9d\3\ueea3\3\ueea5\3\ueea7")
buf.write("\3\ueeab\3\ueead\3\ueebd\3\2\4\ua6d8\4\ua702\4\ub736\4")
buf.write("\ub742\4\ub81f\4\ub822\4\ucea3\4\uceb2\4\uebe2\4\uf802")
buf.write("\4\ufa1f\4\u0109\2\u0302\2\u0371\2\u0485\2\u048b\2\u0593")
buf.write("\2\u05bf\2\u05c1\2\u05c1\2\u05c3\2\u05c4\2\u05c6\2\u05c7")
buf.write("\2\u05c9\2\u05c9\2\u0612\2\u061c\2\u064d\2\u0661\2\u0672")
buf.write("\2\u0672\2\u06d8\2\u06de\2\u06e1\2\u06e6\2\u06e9\2\u06ea")
buf.write("\2\u06ec\2\u06ef\2\u0713\2\u0713\2\u0732\2\u074c\2\u07a8")
buf.write("\2\u07b2\2\u07ed\2\u07f5\2\u0818\2\u081b\2\u081d\2\u0825")
buf.write("\2\u0827\2\u0829\2\u082b\2\u082f\2\u085b\2\u085d\2\u08d6")
buf.write("\2\u08e3\2\u08e5\2\u0905\2\u093c\2\u093e\2\u0940\2\u0951")
buf.write("\2\u0953\2\u0959\2\u0964\2\u0965\2\u0983\2\u0985\2\u09be")
buf.write("\2\u09be\2\u09c0\2\u09c6\2\u09c9\2\u09ca\2\u09cd\2\u09cf")
buf.write("\2\u09d9\2\u09d9\2\u09e4\2\u09e5\2\u0a03\2\u0a05\2\u0a3e")
buf.write("\2\u0a3e\2\u0a40\2\u0a44\2\u0a49\2\u0a4a\2\u0a4d\2\u0a4f")
buf.write("\2\u0a53\2\u0a53\2\u0a72\2\u0a73\2\u0a77\2\u0a77\2\u0a83")
buf.write("\2\u0a85\2\u0abe\2\u0abe\2\u0ac0\2\u0ac7\2\u0ac9\2\u0acb")
buf.write("\2\u0acd\2\u0acf\2\u0ae4\2\u0ae5\2\u0afc\2\u0b01\2\u0b03")
buf.write("\2\u0b05\2\u0b3e\2\u0b3e\2\u0b40\2\u0b46\2\u0b49\2\u0b4a")
buf.write("\2\u0b4d\2\u0b4f\2\u0b58\2\u0b59\2\u0b64\2\u0b65\2\u0b84")
buf.write("\2\u0b84\2\u0bc0\2\u0bc4\2\u0bc8\2\u0bca\2\u0bcc\2\u0bcf")
buf.write("\2\u0bd9\2\u0bd9\2\u0c02\2\u0c05\2\u0c40\2\u0c46\2\u0c48")
buf.write("\2\u0c4a\2\u0c4c\2\u0c4f\2\u0c57\2\u0c58\2\u0c64\2\u0c65")
buf.write("\2\u0c83\2\u0c85\2\u0cbe\2\u0cbe\2\u0cc0\2\u0cc6\2\u0cc8")
buf.write("\2\u0cca\2\u0ccc\2\u0ccf\2\u0cd7\2\u0cd8\2\u0ce4\2\u0ce5")
buf.write("\2\u0d02\2\u0d05\2\u0d3d\2\u0d3e\2\u0d40\2\u0d46\2\u0d48")
buf.write("\2\u0d4a\2\u0d4c\2\u0d4f\2\u0d59\2\u0d59\2\u0d64\2\u0d65")
buf.write("\2\u0d84\2\u0d85\2\u0dcc\2\u0dcc\2\u0dd1\2\u0dd6\2\u0dd8")
buf.write("\2\u0dd8\2\u0dda\2\u0de1\2\u0df4\2\u0df5\2\u0e33\2\u0e33")
buf.write("\2\u0e36\2\u0e3c\2\u0e49\2\u0e50\2\u0eb3\2\u0eb3\2\u0eb6")
buf.write("\2\u0ebb\2\u0ebd\2\u0ebe\2\u0eca\2\u0ecf\2\u0f1a\2\u0f1b")
buf.write("\2\u0f37\2\u0f37\2\u0f39\2\u0f39\2\u0f3b\2\u0f3b\2\u0f40")
buf.write("\2\u0f41\2\u0f73\2\u0f86\2\u0f88\2\u0f89\2\u0f8f\2\u0f99")
buf.write("\2\u0f9b\2\u0fbe\2\u0fc8\2\u0fc8\2\u102d\2\u1040\2\u1058")
buf.write("\2\u105b\2\u1060\2\u1062\2\u1064\2\u1066\2\u1069\2\u106f")
buf.write("\2\u1073\2\u1076\2\u1084\2\u108f\2\u1091\2\u1091\2\u109c")
buf.write("\2\u109f\2\u135f\2\u1361\2\u1714\2\u1716\2\u1734\2\u1736")
buf.write("\2\u1754\2\u1755\2\u1774\2\u1775\2\u17b6\2\u17d5\2\u17df")
buf.write("\2\u17df\2\u180d\2\u180f\2\u1887\2\u1888\2\u18ab\2\u18ab")
buf.write("\2\u1922\2\u192d\2\u1932\2\u193d\2\u1a19\2\u1a1d\2\u1a57")
buf.write("\2\u1a60\2\u1a62\2\u1a7e\2\u1a81\2\u1a81\2\u1ab2\2\u1ac0")
buf.write("\2\u1b02\2\u1b06\2\u1b36\2\u1b46\2\u1b6d\2\u1b75\2\u1b82")
buf.write("\2\u1b84\2\u1ba3\2\u1baf\2\u1be8\2\u1bf5\2\u1c26\2\u1c39")
buf.write("\2\u1cd2\2\u1cd4\2\u1cd6\2\u1cea\2\u1cef\2\u1cef\2\u1cf4")
buf.write("\2\u1cf6\2\u1cf9\2\u1cfb\2\u1dc2\2\u1dfb\2\u1dfd\2\u1e01")
buf.write("\2\u20d2\2\u20f2\2\u2cf1\2\u2cf3\2\u2d81\2\u2d81\2\u2de2")
buf.write("\2\u2e01\2\u302c\2\u3031\2\u309b\2\u309c\2\ua671\2\ua674")
buf.write("\2\ua676\2\ua67f\2\ua6a0\2\ua6a1\2\ua6f2\2\ua6f3\2\ua804")
buf.write("\2\ua804\2\ua808\2\ua808\2\ua80d\2\ua80d\2\ua825\2\ua829")
buf.write("\2\ua882\2\ua883\2\ua8b6\2\ua8c7\2\ua8e2\2\ua8f3\2\ua928")
buf.write("\2\ua92f\2\ua949\2\ua955\2\ua982\2\ua985\2\ua9b5\2\ua9c2")
buf.write("\2\ua9e7\2\ua9e7\2\uaa2b\2\uaa38\2\uaa45\2\uaa45\2\uaa4e")
buf.write("\2\uaa4f\2\uaa7d\2\uaa7f\2\uaab2\2\uaab2\2\uaab4\2\uaab6")
buf.write("\2\uaab9\2\uaaba\2\uaac0\2\uaac1\2\uaac3\2\uaac3\2\uaaed")
buf.write("\2\uaaf1\2\uaaf7\2\uaaf8\2\uabe5\2\uabec\2\uabee\2\uabef")
buf.write("\2\ufb20\2\ufb20\2\ufe02\2\ufe11\2\ufe22\2\ufe31\2\u01ff")
buf.write("\3\u01ff\3\u02e2\3\u02e2\3\u0378\3\u037c\3\u0a03\3\u0a05")
buf.write("\3\u0a07\3\u0a08\3\u0a0e\3\u0a11\3\u0a3a\3\u0a3c\3\u0a41")
buf.write("\3\u0a41\3\u0ae7\3\u0ae8\3\u1002\3\u1004\3\u103a\3\u1048")
buf.write("\3\u1081\3\u1084\3\u10b2\3\u10bc\3\u1102\3\u1104\3\u1129")
buf.write("\3\u1136\3\u1175\3\u1175\3\u1182\3\u1184\3\u11b5\3\u11c2")
buf.write("\3\u11cc\3\u11ce\3\u122e\3\u1239\3\u1240\3\u1240\3\u12e1")
buf.write("\3\u12ec\3\u1302\3\u1305\3\u133e\3\u133e\3\u1340\3\u1346")
buf.write("\3\u1349\3\u134a\3\u134d\3\u134f\3\u1359\3\u1359\3\u1364")
buf.write("\3\u1365\3\u1368\3\u136e\3\u1372\3\u1376\3\u1437\3\u1448")
buf.write("\3\u14b2\3\u14c5\3\u15b1\3\u15b7\3\u15ba\3\u15c2\3\u15de")
buf.write("\3\u15df\3\u1632\3\u1642\3\u16ad\3\u16b9\3\u171f\3\u172d")
buf.write("\3\u1a03\3\u1a0c\3\u1a35\3\u1a3b\3\u1a3d\3\u1a40\3\u1a49")
buf.write("\3\u1a49\3\u1a53\3\u1a5d\3\u1a8c\3\u1a9b\3\u1c31\3\u1c38")
buf.write("\3\u1c3a\3\u1c41\3\u1c94\3\u1ca9\3\u1cab\3\u1cb8\3\u1d33")
buf.write("\3\u1d38\3\u1d3c\3\u1d3c\3\u1d3e\3\u1d3f\3\u1d41\3\u1d47")
buf.write("\3\u1d49\3\u1d49\3\u6af2\3\u6af6\3\u6b32\3\u6b38\3\u6f53")
buf.write("\3\u6f80\3\u6f91\3\u6f94\3\ubc9f\3\ubca0\3\ud167\3\ud16b")
buf.write("\3\ud16f\3\ud174\3\ud17d\3\ud184\3\ud187\3\ud18d\3\ud1ac")
buf.write("\3\ud1af\3\ud244\3\ud246\3\uda02\3\uda38\3\uda3d\3\uda6e")
buf.write("\3\uda77\3\uda77\3\uda86\3\uda86\3\uda9d\3\udaa1\3\udaa3")
buf.write("\3\udab1\3\ue002\3\ue008\3\ue00a\3\ue01a\3\ue01d\3\ue023")
buf.write("\3\ue025\3\ue026\3\ue028\3\ue02c\3\ue8d2\3\ue8d8\3\ue946")
buf.write("\3\ue94c\3\u0102\20\u01f1\209\2\62\2;\2\u0662\2\u066b")
buf.write("\2\u06f2\2\u06fb\2\u07c2\2\u07cb\2\u0968\2\u0971\2\u09e8")
buf.write("\2\u09f1\2\u0a68\2\u0a71\2\u0ae8\2\u0af1\2\u0b68\2\u0b71")
buf.write("\2\u0be8\2\u0bf1\2\u0c68\2\u0c71\2\u0ce8\2\u0cf1\2\u0d68")
buf.write("\2\u0d71\2\u0de8\2\u0df1\2\u0e52\2\u0e5b\2\u0ed2\2\u0edb")
buf.write("\2\u0f22\2\u0f2b\2\u1042\2\u104b\2\u1092\2\u109b\2\u17e2")
buf.write("\2\u17eb\2\u1812\2\u181b\2\u1948\2\u1951\2\u19d2\2\u19db")
buf.write("\2\u1a82\2\u1a8b\2\u1a92\2\u1a9b\2\u1b52\2\u1b5b\2\u1bb2")
buf.write("\2\u1bbb\2\u1c42\2\u1c4b\2\u1c52\2\u1c5b\2\ua622\2\ua62b")
buf.write("\2\ua8d2\2\ua8db\2\ua902\2\ua90b\2\ua9d2\2\ua9db\2\ua9f2")
buf.write("\2\ua9fb\2\uaa52\2\uaa5b\2\uabf2\2\uabfb\2\uff12\2\uff1b")
buf.write("\2\u04a2\3\u04ab\3\u1068\3\u1071\3\u10f2\3\u10fb\3\u1138")
buf.write("\3\u1141\3\u11d2\3\u11db\3\u12f2\3\u12fb\3\u1452\3\u145b")
buf.write("\3\u14d2\3\u14db\3\u1652\3\u165b\3\u16c2\3\u16cb\3\u1732")
buf.write("\3\u173b\3\u18e2\3\u18eb\3\u1c52\3\u1c5b\3\u1d52\3\u1d5b")
buf.write("\3\u6a62\3\u6a6b\3\u6b52\3\u6b5b\3\ud7d0\3\ud801\3\ue952")
buf.write("\3\ue95b\3\u00da\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2")
buf.write("\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21")
buf.write("\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3")
buf.write("\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2")
buf.write("\2\2+\3\2\2\2\3-\3\2\2\2\5/\3\2\2\2\7\61\3\2\2\2\tI\3")
buf.write("\2\2\2\13K\3\2\2\2\r}\3\2\2\2\17\177\3\2\2\2\21\u0086")
buf.write("\3\2\2\2\23\u0089\3\2\2\2\25\u008e\3\2\2\2\27\u0093\3")
buf.write("\2\2\2\31\u009a\3\2\2\2\33\u00a2\3\2\2\2\35\u00a9\3\2")
buf.write("\2\2\37\u00ad\3\2\2\2!\u00af\3\2\2\2#\u00b1\3\2\2\2%\u00b8")
buf.write("\3\2\2\2\'\u00ba\3\2\2\2)\u00bc\3\2\2\2+\u00bf\3\2\2\2")
buf.write("-.\7$\2\2.\4\3\2\2\2/\60\7)\2\2\60\6\3\2\2\2\61\62\7<")
buf.write("\2\2\62\b\3\2\2\2\63\66\5\23\n\2\64\65\7/\2\2\65\67\5")
buf.write("\33\16\2\66\64\3\2\2\2\678\3\2\2\28\66\3\2\2\289\3\2\2")
buf.write("\29J\3\2\2\2:;\5\33\16\2;<\7/\2\2<>\3\2\2\2=:\3\2\2\2")
buf.write(">?\3\2\2\2?=\3\2\2\2?@\3\2\2\2@A\3\2\2\2AF\5\23\n\2BC")
buf.write("\7/\2\2CE\5\33\16\2DB\3\2\2\2EH\3\2\2\2FD\3\2\2\2FG\3")
buf.write("\2\2\2GJ\3\2\2\2HF\3\2\2\2I\63\3\2\2\2I=\3\2\2\2J\n\3")
buf.write("\2\2\2KO\5\33\16\2LM\5\'\24\2MN\5\33\16\2NP\3\2\2\2OL")
buf.write("\3\2\2\2PQ\3\2\2\2QO\3\2\2\2QR\3\2\2\2R\f\3\2\2\2ST\5")
buf.write("\37\20\2TU\t\2\2\2U[\5!\21\2VY\7/\2\2WZ\5\23\n\2XZ\5\25")
buf.write("\13\2YW\3\2\2\2YX\3\2\2\2Z\\\3\2\2\2[V\3\2\2\2\\]\3\2")
buf.write("\2\2][\3\2\2\2]^\3\2\2\2^~\3\2\2\2_b\5\23\n\2`b\5\25\13")
buf.write("\2a_\3\2\2\2a`\3\2\2\2by\3\2\2\2cm\7/\2\2dn\5\23\n\2e")
buf.write("j\5\25\13\2fg\7.\2\2gi\5\25\13\2hf\3\2\2\2il\3\2\2\2j")
buf.write("h\3\2\2\2jk\3\2\2\2kn\3\2\2\2lj\3\2\2\2md\3\2\2\2me\3")
buf.write("\2\2\2nz\3\2\2\2op\5\37\20\2pq\5\25\13\2qv\5!\21\2rs\7")
buf.write(".\2\2su\5\25\13\2tr\3\2\2\2ux\3\2\2\2vt\3\2\2\2vw\3\2")
buf.write("\2\2wz\3\2\2\2xv\3\2\2\2yc\3\2\2\2yo\3\2\2\2z{\3\2\2\2")
buf.write("{y\3\2\2\2{|\3\2\2\2|~\3\2\2\2}S\3\2\2\2}a\3\2\2\2~\16")
buf.write("\3\2\2\2\177\u0082\5\23\n\2\u0080\u0081\7)\2\2\u0081\u0083")
buf.write("\5\23\n\2\u0082\u0080\3\2\2\2\u0083\u0084\3\2\2\2\u0084")
buf.write("\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\20\3\2\2\2\u0086")
buf.write("\u0087\7/\2\2\u0087\22\3\2\2\2\u0088\u008a\5#\22\2\u0089")
buf.write("\u0088\3\2\2\2\u008a\u008b\3\2\2\2\u008b\u0089\3\2\2\2")
buf.write("\u008b\u008c\3\2\2\2\u008c\24\3\2\2\2\u008d\u008f\5%\23")
buf.write("\2\u008e\u008d\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u008e")
buf.write("\3\2\2\2\u0090\u0091\3\2\2\2\u0091\26\3\2\2\2\u0092\u0094")
buf.write("\5#\22\2\u0093\u0092\3\2\2\2\u0094\u0095\3\2\2\2\u0095")
buf.write("\u0093\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u0097\3\2\2\2")
buf.write("\u0097\u0098\7\60\2\2\u0098\30\3\2\2\2\u0099\u009b\5\33")
buf.write("\16\2\u009a\u0099\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009a")
buf.write("\3\2\2\2\u009c\u009d\3\2\2\2\u009d\u009e\3\2\2\2\u009e")
buf.write("\u009f\t\3\2\2\u009f\32\3\2\2\2\u00a0\u00a3\5#\22\2\u00a1")
buf.write("\u00a3\5%\23\2\u00a2\u00a0\3\2\2\2\u00a2\u00a1\3\2\2\2")
buf.write("\u00a3\u00a4\3\2\2\2\u00a4\u00a2\3\2\2\2\u00a4\u00a5\3")
buf.write("\2\2\2\u00a5\34\3\2\2\2\u00a6\u00aa\5#\22\2\u00a7\u00aa")
buf.write("\5%\23\2\u00a8\u00aa\5)\25\2\u00a9\u00a6\3\2\2\2\u00a9")
buf.write("\u00a7\3\2\2\2\u00a9\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2")
buf.write("\u00ab\u00a9\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ac\36\3\2")
buf.write("\2\2\u00ad\u00ae\7*\2\2\u00ae \3\2\2\2\u00af\u00b0\7+")
buf.write("\2\2\u00b0\"\3\2\2\2\u00b1\u00b5\t\6\2\2\u00b2\u00b4\t")
buf.write("\7\2\2\u00b3\u00b2\3\2\2\2\u00b4\u00b7\3\2\2\2\u00b5\u00b3")
buf.write("\3\2\2\2\u00b5\u00b6\3\2\2\2\u00b6$\3\2\2\2\u00b7\u00b5")
buf.write("\3\2\2\2\u00b8\u00b9\t\b\2\2\u00b9&\3\2\2\2\u00ba\u00bb")
buf.write("\4.\61\2\u00bb(\3\2\2\2\u00bc\u00bd\t\4\2\2\u00bd*\3\2")
buf.write("\2\2\u00be\u00c0\t\5\2\2\u00bf\u00be\3\2\2\2\u00c0\u00c1")
buf.write("\3\2\2\2\u00c1\u00bf\3\2\2\2\u00c1\u00c2\3\2\2\2\u00c2")
buf.write("\u00c3\3\2\2\2\u00c3\u00c4\b\26\2\2\u00c4,\3\2\2\2\34")
buf.write("\28?FIQY]ajmvy{}\u0084\u008b\u0090\u0095\u009c\u00a2\u00a4")
buf.write("\u00a9\u00ab\u00b5\u00c1\3\b\2\2")
return buf.getvalue()
class ScicopiaLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
DASH = 4
NUM = 5
COMPOUND = 6
APOSTROPHE = 7
NOT = 8
ALPHA = 9
DIGITS = 10
ABBREV = 11
CHARGED = 12
ALPHANUM = 13
STRING = 14
LPAR = 15
RPAR = 16
WHITESPACE = 17
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'\"'", "'''", "':'", "'-'", "'('", "')'" ]
symbolicNames = [ "<INVALID>",
"DASH", "NUM", "COMPOUND", "APOSTROPHE", "NOT", "ALPHA", "DIGITS",
"ABBREV", "CHARGED", "ALPHANUM", "STRING", "LPAR", "RPAR", "WHITESPACE" ]
ruleNames = [ "T__0", "T__1", "T__2", "DASH", "NUM", "COMPOUND", "APOSTROPHE",
"NOT", "ALPHA", "DIGITS", "ABBREV", "CHARGED", "ALPHANUM",
"STRING", "LPAR", "RPAR", "LETTER", "DIGIT", "PCT", "ASCII",
"WHITESPACE" ]
grammarFileName = "Scicopia.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 68.899263
| 103
| 0.61415
| 6,062
| 28,042
| 2.836028
| 0.307489
| 0.158213
| 0.098418
| 0.025593
| 0.065437
| 0.03839
| 0.020126
| 0.017973
| 0.014367
| 0.014367
| 0
| 0.349965
| 0.125918
| 28,042
| 406
| 104
| 69.068966
| 0.351434
| 0.001462
| 0
| 0
| 1
| 0.853093
| 0.681894
| 0.671786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005155
| false
| 0
| 0.012887
| 0
| 0.087629
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc031334e22f739f970afae8284b1de7a3fbf622
| 3,772
|
py
|
Python
|
src/models/lla7.py
|
Neuralwood-Net/face-recognizer-9000
|
b7804355927540bf07ce70cfe44dac6988a9b8cc
|
[
"MIT"
] | null | null | null |
src/models/lla7.py
|
Neuralwood-Net/face-recognizer-9000
|
b7804355927540bf07ce70cfe44dac6988a9b8cc
|
[
"MIT"
] | null | null | null |
src/models/lla7.py
|
Neuralwood-Net/face-recognizer-9000
|
b7804355927540bf07ce70cfe44dac6988a9b8cc
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class FleetwoodNet7V1(nn.Module):
def __init__(
self,
num_classes,
init_weights=True,
):
super().__init__()
# Convolutional Layers
self.features = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((8, 8))
# Fully-connected classification layers
self.classifier = nn.Sequential(
nn.Linear(256 * 8 * 8, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
class FleetwoodNet9V2(nn.Module):
def __init__(
self,
num_classes,
):
super().__init__()
# Convolutional Layers
self.features = nn.Sequential(
nn.Conv2d(1, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((8, 8))
# Fully-connected classification layers
self.classifier = nn.Sequential(
nn.Linear(512 * 8 * 8, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| 31.173554
| 86
| 0.526511
| 452
| 3,772
| 4.283186
| 0.165929
| 0.082645
| 0.094008
| 0.122934
| 0.805269
| 0.805269
| 0.78874
| 0.757231
| 0.757231
| 0.757231
| 0
| 0.077456
| 0.349682
| 3,772
| 120
| 87
| 31.433333
| 0.711781
| 0.031018
| 0
| 0.69697
| 0
| 0
| 0.003014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050505
| false
| 0
| 0.040404
| 0
| 0.131313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70b4f2f4ae759f100993ad441e0e904b1a1b8cda
| 202
|
py
|
Python
|
iterm2_images/__init__.py
|
crowsonkb/iterm2-images
|
74839760cc8df7c3688c0611f2838251f1f7ce4c
|
[
"MIT"
] | null | null | null |
iterm2_images/__init__.py
|
crowsonkb/iterm2-images
|
74839760cc8df7c3688c0611f2838251f1f7ce4c
|
[
"MIT"
] | null | null | null |
iterm2_images/__init__.py
|
crowsonkb/iterm2-images
|
74839760cc8df7c3688c0611f2838251f1f7ce4c
|
[
"MIT"
] | null | null | null |
"""Inline images and file transfers for iTerm2."""
from .payloads import FileEsc, ImageLenUnit, ImageDim, ImageEsc
__all__ = ['FileEsc', 'ImageLenUnit', 'ImageDim', 'ImageEsc']
__version__ = '0.1.0'
| 25.25
| 63
| 0.722772
| 23
| 202
| 6
| 0.782609
| 0.275362
| 0.391304
| 0.507246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.128713
| 202
| 7
| 64
| 28.857143
| 0.761364
| 0.217822
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cb445ec5444cd991521ff653a8f4d9d4947022a5
| 23
|
py
|
Python
|
__init__.py
|
p3lim/pycaddy
|
06f3f81ac4963ea9f33e4cebfbc26a192797c060
|
[
"Unlicense"
] | null | null | null |
__init__.py
|
p3lim/pycaddy
|
06f3f81ac4963ea9f33e4cebfbc26a192797c060
|
[
"Unlicense"
] | null | null | null |
__init__.py
|
p3lim/pycaddy
|
06f3f81ac4963ea9f33e4cebfbc26a192797c060
|
[
"Unlicense"
] | null | null | null |
from .pycaddy import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cbdd353e24127433a693c7b17b59da30a2b8a103
| 47
|
py
|
Python
|
giant/__init__.py
|
lixar/giant
|
fba966e4389b80b38bee1067ad9173adf4eaa5b5
|
[
"MIT"
] | null | null | null |
giant/__init__.py
|
lixar/giant
|
fba966e4389b80b38bee1067ad9173adf4eaa5b5
|
[
"MIT"
] | 2
|
2016-05-26T14:40:07.000Z
|
2017-04-13T21:07:16.000Z
|
giant/__init__.py
|
lixar/giant
|
fba966e4389b80b38bee1067ad9173adf4eaa5b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from .giant import giant
| 15.666667
| 24
| 0.744681
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 3
| 24
| 15.666667
| 0.853659
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1dc83bf38dea68e5fa59aec50cd42cf81c0bc8e7
| 115
|
py
|
Python
|
Lab1/lab1_boiko.py
|
Nickas47/python_savka
|
31101bba6a7e75bc398136d01e5e0cb9d68df097
|
[
"Apache-2.0"
] | null | null | null |
Lab1/lab1_boiko.py
|
Nickas47/python_savka
|
31101bba6a7e75bc398136d01e5e0cb9d68df097
|
[
"Apache-2.0"
] | null | null | null |
Lab1/lab1_boiko.py
|
Nickas47/python_savka
|
31101bba6a7e75bc398136d01e5e0cb9d68df097
|
[
"Apache-2.0"
] | null | null | null |
print("'Python': \n Lab1 Mykola Boiko, pm-12343")
print('Savka, '*44+'Savka')
print(123.8+((11-21.1/2)/(87-32.2)))
| 28.75
| 49
| 0.617391
| 22
| 115
| 3.227273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216981
| 0.078261
| 115
| 3
| 50
| 38.333333
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0.452174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1dce902408db5ee1414702b0dd430e71f3feee8c
| 35
|
py
|
Python
|
cytominer_eval/transform/__init__.py
|
michaelbornholdt/cytominer-eval
|
97b471dd4141d29bfcb06921cb1e294596c39ecf
|
[
"BSD-3-Clause"
] | 4
|
2020-06-11T20:31:17.000Z
|
2021-02-12T04:12:43.000Z
|
cytominer_eval/transform/__init__.py
|
michaelbornholdt/cytominer-eval
|
97b471dd4141d29bfcb06921cb1e294596c39ecf
|
[
"BSD-3-Clause"
] | 46
|
2020-06-16T11:31:49.000Z
|
2021-12-07T10:52:00.000Z
|
cytominer_eval/transform/__init__.py
|
michaelbornholdt/cytominer-eval
|
97b471dd4141d29bfcb06921cb1e294596c39ecf
|
[
"BSD-3-Clause"
] | 6
|
2020-06-11T18:36:31.000Z
|
2021-04-15T19:38:52.000Z
|
from .transform import metric_melt
| 17.5
| 34
| 0.857143
| 5
| 35
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1de087e7cf06560132b5a3fa47fcfc082d7d2542
| 552
|
py
|
Python
|
wrappers/python/tests/non_secrets/test_add_wallet_record.py
|
sklump/indy-sdk
|
ee05a89ddf60b42f7483bebf2d89a936e12730df
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
wrappers/python/tests/non_secrets/test_add_wallet_record.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
wrappers/python/tests/non_secrets/test_add_wallet_record.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
import pytest
from indy import error
from tests.non_secrets.common import *
@pytest.mark.asyncio
async def test_add_wallet_record_works(wallet_handle):
await non_secrets.add_wallet_record(wallet_handle, type_, id1, value1, tags1)
@pytest.mark.asyncio
async def test_add_wallet_record_works_for_duplicate(wallet_handle):
await non_secrets.add_wallet_record(wallet_handle, type_, id1, value1, tags1)
with pytest.raises(error.WalletItemAlreadyExists):
await non_secrets.add_wallet_record(wallet_handle, type_, id1, value1, tags1)
| 32.470588
| 85
| 0.811594
| 79
| 552
| 5.316456
| 0.367089
| 0.107143
| 0.178571
| 0.128571
| 0.719048
| 0.719048
| 0.719048
| 0.719048
| 0.719048
| 0.719048
| 0
| 0.018405
| 0.11413
| 552
| 16
| 86
| 34.5
| 0.840491
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1df03d6f373e17c7e521ba9e893650c959234f49
| 124
|
py
|
Python
|
src/mercs/algo/__init__.py
|
MattiasDC/mercs
|
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
|
[
"MIT"
] | 11
|
2020-01-28T16:15:53.000Z
|
2021-05-20T08:05:42.000Z
|
src/mercs/algo/__init__.py
|
MattiasDC/mercs
|
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
|
[
"MIT"
] | null | null | null |
src/mercs/algo/__init__.py
|
MattiasDC/mercs
|
466962e254c4f56f4a16a31b1a3d7bd893c8e23e
|
[
"MIT"
] | 4
|
2020-02-06T09:02:28.000Z
|
2022-02-14T09:42:04.000Z
|
from .new_prediction import (mi, mrai, it, rw)
from .selection import base_selection_algorithm, random_selection_algorithm
| 31
| 75
| 0.830645
| 17
| 124
| 5.764706
| 0.705882
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104839
| 124
| 3
| 76
| 41.333333
| 0.882883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1df1820777129248ae5138dd09511cef7f75f054
| 6,106
|
py
|
Python
|
pymccrgb/tests/test_core.py
|
stgl/pymccrgb
|
dc8ad2e46cbe6ff8081c32fa11bce68f869baafa
|
[
"MIT"
] | 3
|
2020-11-30T12:49:14.000Z
|
2021-11-12T00:32:32.000Z
|
pymccrgb/tests/test_core.py
|
rmsare/pymccrgb
|
dc8ad2e46cbe6ff8081c32fa11bce68f869baafa
|
[
"MIT"
] | 20
|
2019-06-18T19:10:00.000Z
|
2019-11-14T22:55:10.000Z
|
pymccrgb/tests/test_core.py
|
rmsare/pymccrgb
|
dc8ad2e46cbe6ff8081c32fa11bce68f869baafa
|
[
"MIT"
] | 3
|
2019-06-14T00:39:25.000Z
|
2019-10-30T14:07:33.000Z
|
""" Test Python MCC bindings and MCC-RGB algorithm """
import os
import pytest
import unittest
import numpy as np
from context import pymccrgb
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), "output")
TEST_SCALES = [0.5, 1.0, 1.5]
TEST_TOLS = [0.01, 0.05, 0.3, 0.5, 1.0]
SEED_VALUE = 42
class MCCTestCase(unittest.TestCase):
def setUp(self):
self.data = pymccrgb.ioutils.read_las(
os.path.join(TEST_DATA_DIR, "points_rgb.laz")
)
def _test_classify_ground_mcc(self, scale, tol):
test = pymccrgb.core.classify_ground_mcc(self.data, scale, tol)
true = np.load(
os.path.join(TEST_OUTPUT_DIR, f"classification_mcc_{scale}_{tol}.npy"),
allow_pickle=True,
)
self.assertSequenceEqual(
test.tolist(),
true.tolist(),
f"MCC ground classification is incorrect for scale {scale} and height tolerance {tol}",
)
def test_mcc_classification(self):
for scale in TEST_SCALES:
for tol in TEST_TOLS:
self._test_classify_ground_mcc(scale, tol)
def test_mcc_default(self):
test_points, test_labels = pymccrgb.core.mcc(self.data, verbose=True)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mcc_default.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for default MCC configuration",
)
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for default MCC configuration",
)
def test_mcc_default_las_codes(self):
test_points, test_labels = pymccrgb.core.mcc(self.data,
verbose=True,
use_las_codes=True)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mcc_default.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for default MCC configuration using LAS codes",
)
true_labels[true_labels == 0] = 4
true_labels[true_labels == 1] = 2
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for default MCC configuration using LAS codes",
)
class MCCRGBTestCase(unittest.TestCase):
def setUp(self):
self.data = pymccrgb.ioutils.read_las(
os.path.join(TEST_DATA_DIR, "points_rgb.laz")
)
def test_mcc_rgb_default(self):
test_points, test_labels = pymccrgb.core.mcc_rgb(
self.data, seed=SEED_VALUE, verbose=True
)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mccrgb_default.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for default MCC-RGB configuration",
)
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for default MCC-RGB configuration",
)
def test_mcc_default_las_codes(self):
test_points, test_labels = pymccrgb.core.mcc_rgb(self.data,
seed=SEED_VALUE,
verbose=True,
use_las_codes=True)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mccrgb_default.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for default MCC configuration using LAS codes",
)
true_labels[true_labels == 0] = 4
true_labels[true_labels == 1] = 2
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for default MCC configuration using LAS codes",
)
def test_mcc_rgb_default_parallel(self):
test_points, test_labels = pymccrgb.core.mcc_rgb(
self.data, seed=SEED_VALUE, n_jobs=2, verbose=True
)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mccrgb_default.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for default MCC-RGB configuration with parallelization",
)
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for default MCC-RGB configuration with parallelization",
)
def test_mcc_rgb_two_training_tols(self):
test_points, test_labels = pymccrgb.core.mcc_rgb(
self.data,
tols=[1.0, 0.3, 0.3],
scales=[0.5, 1.0, 1.5],
training_tols=[1.0, 0.3],
training_scales=[0.5, 0.5],
seed=SEED_VALUE,
verbose=True,
)
true_points, true_labels = np.load(
os.path.join(TEST_OUTPUT_DIR, f"ground_labels_mccrgb_twotols_1.0_0.3.npy"),
allow_pickle=True,
)
self.assertTrue(
np.allclose(test_points, true_points),
"Ground points are incorrect for MCC-RGB using training tols 1.0 and 0.3",
)
self.assertSequenceEqual(
test_labels.tolist(),
true_labels.tolist(),
"Classification is incorrect for MCC-RGB using training tols 1.0 and 0.3",
)
| 36.130178
| 99
| 0.592368
| 721
| 6,106
| 4.775312
| 0.122053
| 0.058089
| 0.031949
| 0.063898
| 0.821667
| 0.803079
| 0.803079
| 0.796108
| 0.767645
| 0.754865
| 0
| 0.014639
| 0.317557
| 6,106
| 168
| 100
| 36.345238
| 0.811615
| 0.007534
| 0
| 0.513699
| 0
| 0
| 0.195803
| 0.038004
| 0
| 0
| 0
| 0
| 0.089041
| 1
| 0.068493
| false
| 0
| 0.034247
| 0
| 0.116438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38467ebf6ab127d5ff882dae11a281a4fa48316b
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/aiohttp/web_protocol.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/aiohttp/web_protocol.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/aiohttp/web_protocol.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/da/71/cd/c7183b31ff193fcf1214fc6704d18bf3398f850a63b59ebc553d0f788a
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
69c7452188bf1ae7d06aa7f27a1dddb0178c7ffb
| 393
|
py
|
Python
|
metadata_extractor/builders/rdbms/__init__.py
|
pongthep/apache-atlas-external-tools
|
7931375cbd9e4aae1d9f9ced9c77af2e4f9640b4
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-03-25T04:31:22.000Z
|
2021-03-25T04:31:22.000Z
|
metadata_extractor/builders/rdbms/__init__.py
|
pongthep/apache-atlas-external-tools
|
7931375cbd9e4aae1d9f9ced9c77af2e4f9640b4
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-03-25T09:21:08.000Z
|
2021-03-25T15:29:08.000Z
|
metadata_extractor/builders/rdbms/__init__.py
|
pongthep/apache-atlas-external-tools
|
7931375cbd9e4aae1d9f9ced9c77af2e4f9640b4
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-03-25T09:17:03.000Z
|
2021-12-30T09:52:39.000Z
|
from metadata_extractor.builders.rdbms.mariadb_builder import MaridbBuilder
from metadata_extractor.builders.rdbms.mysql_builder import MysqlBuilder
from metadata_extractor.builders.rdbms.postgresql_builder import PostgresqlBuilder
from metadata_extractor.builders.rdbms.rdbms_builder_abstract import RDBMSBuilder
from metadata_extractor.builders.rdbms.redshift_builder import RedshiftBuilder
| 65.5
| 82
| 0.910941
| 46
| 393
| 7.543478
| 0.369565
| 0.172911
| 0.302594
| 0.417867
| 0.489914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050891
| 393
| 5
| 83
| 78.6
| 0.930295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
38a703f3eee746762a322da845e67938f8baebe6
| 77
|
py
|
Python
|
src/__init__.py
|
lijiancheng0614/house_finder
|
1fe5dace1b3f0b03b605eac8097d2be9e62ac6e7
|
[
"Apache-2.0"
] | 1
|
2021-08-09T06:07:27.000Z
|
2021-08-09T06:07:27.000Z
|
src/__init__.py
|
lijiancheng0614/house_finder
|
1fe5dace1b3f0b03b605eac8097d2be9e62ac6e7
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
lijiancheng0614/house_finder
|
1fe5dace1b3f0b03b605eac8097d2be9e62ac6e7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# @Author: lijiancheng0614
# @Date: 2020-03-06
#
| 12.833333
| 26
| 0.558442
| 9
| 77
| 4.777778
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.209677
| 0.194805
| 77
| 5
| 27
| 15.4
| 0.483871
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38e2625e2f4278c8552f07d98a010105a02fcfd7
| 210
|
py
|
Python
|
pykotor/resource/formats/ltr/__init__.py
|
NickHugi/PyKotor
|
cab1089f8a8a135861bef45340203718d39f5e1f
|
[
"MIT"
] | 1
|
2022-02-21T15:17:28.000Z
|
2022-02-21T15:17:28.000Z
|
pykotor/resource/formats/ltr/__init__.py
|
NickHugi/PyKotor
|
cab1089f8a8a135861bef45340203718d39f5e1f
|
[
"MIT"
] | 1
|
2022-03-12T16:06:23.000Z
|
2022-03-12T16:06:23.000Z
|
pykotor/resource/formats/ltr/__init__.py
|
NickHugi/PyKotor
|
cab1089f8a8a135861bef45340203718d39f5e1f
|
[
"MIT"
] | null | null | null |
from pykotor.resource.formats.ltr.data import LTR, LTRBlock
from pykotor.resource.formats.ltr.auto import load_ltr, write_ltr
from pykotor.resource.formats.ltr.io_binary import LTRBinaryReader, LTRBinaryWriter
| 52.5
| 83
| 0.857143
| 30
| 210
| 5.9
| 0.5
| 0.186441
| 0.322034
| 0.440678
| 0.491525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 210
| 3
| 84
| 70
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a075f781ec6737f554732b4173499f8744f5660
| 30
|
py
|
Python
|
test/test_xndtools.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 3
|
2019-11-12T16:01:26.000Z
|
2020-06-27T19:27:27.000Z
|
test/test_xndtools.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 4
|
2018-04-25T17:12:43.000Z
|
2018-08-23T18:17:24.000Z
|
test/test_xndtools.py
|
xnd-project/xndtools
|
9478f31954091d861ce538ba278f7f888e23d19b
|
[
"BSD-3-Clause"
] | 6
|
2018-05-04T08:10:40.000Z
|
2019-03-19T10:00:21.000Z
|
import xndtools # noqa: F401
| 15
| 29
| 0.733333
| 4
| 30
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.2
| 30
| 1
| 30
| 30
| 0.791667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a112d86cfad35bb4d78b8783af49e868b79699a
| 3,139
|
py
|
Python
|
ckanext/datajson/test-inventory/test_wrappers.py
|
OpenGov-OpenData/ckanext-datajson
|
38af8a777052472d15a2a2a728776d0860eec4bb
|
[
"CC0-1.0"
] | 1
|
2020-05-13T12:20:17.000Z
|
2020-05-13T12:20:17.000Z
|
ckanext/datajson/test-inventory/test_wrappers.py
|
OpenGov-OpenData/ckanext-datajson
|
38af8a777052472d15a2a2a728776d0860eec4bb
|
[
"CC0-1.0"
] | 2
|
2018-12-12T22:01:53.000Z
|
2022-02-01T19:48:14.000Z
|
ckanext/datajson/test-inventory/test_wrappers.py
|
OpenGov-OpenData/ckanext-datajson
|
38af8a777052472d15a2a2a728776d0860eec4bb
|
[
"CC0-1.0"
] | 1
|
2018-07-19T18:06:30.000Z
|
2018-07-19T18:06:30.000Z
|
from ckanext.datajson.package2pod import Wrappers
class TestCatalogDateWrapper(object):
def test_valid_dcat_issued_date(self):
Wrappers.pkg = {
"title": "Test Dataset",
"name": "test-dataset",
"metadata_created": "2021-03-26T00:45:51.542432",
"metadata_modified": "2021-03-26T00:45:51.542439",
"extras": [
{
"key": "dcat_issued",
"value": "2019-10-17T23:04:32.000Z"
},
{
"key": "dcat_modified",
"value": "2021-03-20T00:14:12.000Z"
}
]
}
Wrappers.current_field_map = {
"field": "metadata_created",
"wrapper": "get_catalog_date"
}
issued_date = Wrappers.get_catalog_date(Wrappers.pkg.get('metadata_created'))
assert issued_date == "2019-10-17T23:04:32.000Z"
def test_valid_dcat_modified_date(self):
Wrappers.pkg = {
"title": "Test Dataset",
"name": "test-dataset",
"metadata_created": "2021-03-26T00:45:51.542432",
"metadata_modified": "2021-03-26T00:45:51.542439",
"extras": [
{
"key": "dcat_issued",
"value": "2019-10-17T23:04:32.000Z"
},
{
"key": "dcat_modified",
"value": "2021-03-20T00:14:12.000Z"
}
]
}
Wrappers.current_field_map = {
"field": "metadata_modified",
"wrapper": "get_catalog_date"
}
modified_date = Wrappers.get_catalog_date(Wrappers.pkg.get('metadata_modified'))
assert modified_date == "2021-03-20T00:14:12.000Z"
def test_dcat_modified_only_field(self):
Wrappers.pkg = {
"title": "Test Dataset",
"name": "test-dataset",
"metadata_created": "2021-03-26T00:45:51.542432",
"metadata_modified": "2021-03-26T00:45:51.542439",
"extras": [
{
"key": "dcat_modified",
"value": "2021-03-20T00:14:12.000Z"
}
]
}
Wrappers.current_field_map = {
"field": "metadata_modified",
"wrapper": "get_catalog_date"
}
modified_date = Wrappers.get_catalog_date(Wrappers.pkg.get('metadata_modified'))
assert modified_date == "2021-03-26T00:45:51.542439"
def test_no_dcat_in_extras(self):
Wrappers.pkg = {
"title": "Test Dataset",
"name": "test-dataset",
"metadata_created": "2021-03-26T00:45:51.542432",
"metadata_modified": "2021-03-26T00:45:51.542439",
"extras": []
}
Wrappers.current_field_map = {
"field": "metadata_modified",
"wrapper": "get_catalog_date"
}
modified_date = Wrappers.get_catalog_date(Wrappers.pkg.get('metadata_modified'))
assert modified_date == "2021-03-26T00:45:51.542439"
| 34.494505
| 88
| 0.510354
| 311
| 3,139
| 4.92926
| 0.167203
| 0.054795
| 0.071755
| 0.084801
| 0.85062
| 0.85062
| 0.829746
| 0.829746
| 0.829746
| 0.798434
| 0
| 0.158181
| 0.355527
| 3,139
| 90
| 89
| 34.877778
| 0.599605
| 0
| 0
| 0.604938
| 0
| 0
| 0.338643
| 0.136349
| 0
| 0
| 0
| 0
| 0.049383
| 1
| 0.049383
| false
| 0
| 0.012346
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2a6c9305c342ccaaed4699ef3ed3bdab4e59313d
| 35
|
py
|
Python
|
tests/tests/base_tests/__init__.py
|
vselitsky/aws-device-farm-python-android
|
86182ec2fae531f7376fc4b7261529700d67eb0f
|
[
"Apache-2.0"
] | 23
|
2018-02-06T02:08:17.000Z
|
2022-02-02T08:37:51.000Z
|
tests/tests/base_tests/__init__.py
|
vselitsky/aws-device-farm-python-android
|
86182ec2fae531f7376fc4b7261529700d67eb0f
|
[
"Apache-2.0"
] | 1
|
2018-06-01T02:31:33.000Z
|
2021-06-26T11:34:10.000Z
|
tests/tests/base_tests/__init__.py
|
vselitsky/aws-device-farm-python-android
|
86182ec2fae531f7376fc4b7261529700d67eb0f
|
[
"Apache-2.0"
] | 19
|
2018-02-06T02:08:21.000Z
|
2022-01-28T02:13:32.000Z
|
from native_test import NativeTest
| 17.5
| 34
| 0.885714
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2ad1448b3e66b7ea90735e9321d3e391bc32c3b9
| 536
|
py
|
Python
|
app/src/repositories/categoria_produto_repository.py
|
Leorfk/gelado-api
|
23c62598a90229d4b9c0f61562ca71de6e8182a7
|
[
"MIT"
] | null | null | null |
app/src/repositories/categoria_produto_repository.py
|
Leorfk/gelado-api
|
23c62598a90229d4b9c0f61562ca71de6e8182a7
|
[
"MIT"
] | null | null | null |
app/src/repositories/categoria_produto_repository.py
|
Leorfk/gelado-api
|
23c62598a90229d4b9c0f61562ca71de6e8182a7
|
[
"MIT"
] | null | null | null |
from models.categoria_produto_model import Categoria_Produto
class CategoriaProdutoRepository:
def __init__(self, database: Categoria_Produto) -> None:
self.__categoria_produto_model = database
def create_categoria_produto(self):
pass
def delete_categoria_produto(self, id_categoria_produto: int):
pass
def update_categoria_produto(self):
pass
def get_all_categoria_produto(self):
pass
def get_categoria_produto_by_id(self, id_categoria_produto: int):
pass
| 24.363636
| 69
| 0.733209
| 63
| 536
| 5.777778
| 0.349206
| 0.483516
| 0.21978
| 0.197802
| 0.398352
| 0.324176
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210821
| 536
| 21
| 70
| 25.52381
| 0.86052
| 0
| 0
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.357143
| 0.071429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
2ae013924af934718ba99f38e4b14992a674ee8b
| 25
|
py
|
Python
|
core/game/__init__.py
|
V1ckeyR/snake_snake
|
b22fed3ce48319bb8badf91f02ff205e7f853e6d
|
[
"MIT"
] | 1
|
2021-12-26T21:46:06.000Z
|
2021-12-26T21:46:06.000Z
|
arrow/mappers/fields/__init__.py
|
effordsbeard/arrowstack
|
33f2eff3be07cf65e38610f0701743e775c1bbc6
|
[
"MIT"
] | null | null | null |
arrow/mappers/fields/__init__.py
|
effordsbeard/arrowstack
|
33f2eff3be07cf65e38610f0701743e775c1bbc6
|
[
"MIT"
] | 1
|
2021-12-11T09:11:14.000Z
|
2021-12-11T09:11:14.000Z
|
from .field import Field
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2afc89eedac655f72de7cdc4badf3469a55e3218
| 23
|
py
|
Python
|
gfapy/line/group/path/__init__.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 44
|
2017-03-18T08:08:04.000Z
|
2021-11-10T16:11:15.000Z
|
gfapy/line/group/path/__init__.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 22
|
2017-04-04T21:20:31.000Z
|
2022-03-09T19:05:30.000Z
|
gfapy/line/group/path/__init__.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 5
|
2017-07-07T02:56:56.000Z
|
2020-09-30T20:10:49.000Z
|
from .path import Path
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2dadd7981d130b906b6d2d6a231d3d193fec5cd1
| 91
|
py
|
Python
|
Chapter10/word_suffix/main.py
|
adamvm/Mastering-RUST-Second-Edition
|
ff65aadd16b3857f0ffe13644ac5906e065fd3e9
|
[
"MIT"
] | 102
|
2018-10-13T04:52:46.000Z
|
2022-03-25T05:36:44.000Z
|
Chapter10/word_suffix/main.py
|
adamvm/Mastering-RUST-Second-Edition
|
ff65aadd16b3857f0ffe13644ac5906e065fd3e9
|
[
"MIT"
] | 11
|
2019-07-27T11:35:35.000Z
|
2022-02-26T12:37:13.000Z
|
Chapter10/word_suffix/main.py
|
adamvm/Mastering-RUST-Second-Edition
|
ff65aadd16b3857f0ffe13644ac5906e065fd3e9
|
[
"MIT"
] | 37
|
2018-10-13T04:52:31.000Z
|
2022-03-15T13:12:33.000Z
|
import word_suffix
print(word_suffix.find_words("Baz, Jazz, Mash, Splash, Squash", "sh"))
| 22.75
| 70
| 0.747253
| 14
| 91
| 4.642857
| 0.857143
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098901
| 91
| 3
| 71
| 30.333333
| 0.792683
| 0
| 0
| 0
| 0
| 0
| 0.362637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
2dd15fda2a3f83b568865ac2b5c22d21d936fa77
| 44
|
py
|
Python
|
Controllers/__init__.py
|
Bramsnoek/AutomatedKeggBlast
|
7458f5be224aac96008a6fc327789b152d49b248
|
[
"MIT"
] | null | null | null |
Controllers/__init__.py
|
Bramsnoek/AutomatedKeggBlast
|
7458f5be224aac96008a6fc327789b152d49b248
|
[
"MIT"
] | null | null | null |
Controllers/__init__.py
|
Bramsnoek/AutomatedKeggBlast
|
7458f5be224aac96008a6fc327789b152d49b248
|
[
"MIT"
] | null | null | null |
from .blastcontroller import BlastController
| 44
| 44
| 0.909091
| 4
| 44
| 10
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.