hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d2c47d2a4a53ec7d5d7c11de24ee08281811ee8
| 365
|
py
|
Python
|
dvgutils/modules/video_capture/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | 7
|
2020-09-02T08:39:22.000Z
|
2021-10-13T18:13:04.000Z
|
dvgutils/modules/video_capture/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
dvgutils/modules/video_capture/__init__.py
|
jagin/dvg-utils
|
a7d19ead75398b09a9f1e146464cf4227f06a476
|
[
"MIT"
] | null | null | null |
from .file_video_capture import FileVideoCapture, FileVideoCaptureThreaded
from .camera_video_capture import CameraVideoCapture, CameraVideoCaptureThreaded
from .pi_camera_video_capture import PiCameraVideoCapture, PiCameraVideoCaptureThreaded
from .stream_video_capture import StreamVideoCapture, StreamVideoCaptureThreaded
from .video_capture import VideoCapture
| 60.833333
| 87
| 0.909589
| 34
| 365
| 9.470588
| 0.5
| 0.186335
| 0.279503
| 0.149068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065753
| 365
| 5
| 88
| 73
| 0.944282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9d4109a4e54c99a3c58a624abe5845acb0c9b809
| 367
|
py
|
Python
|
TeddyMark/text.py
|
leosavioli2019/TeddyMark
|
fed3963dd0bd5e84dbe603ed380a893576f674d4
|
[
"MIT"
] | 1
|
2021-04-10T15:09:17.000Z
|
2021-04-10T15:09:17.000Z
|
TeddyMark/text.py
|
leosavioli2019/TeddyMark
|
fed3963dd0bd5e84dbe603ed380a893576f674d4
|
[
"MIT"
] | null | null | null |
TeddyMark/text.py
|
leosavioli2019/TeddyMark
|
fed3963dd0bd5e84dbe603ed380a893576f674d4
|
[
"MIT"
] | null | null | null |
def copright():
print("TeddyMark, By Leonard Fuzzo Savioli")
print("©TeddyMark Lang")
print("Type compile to compile your program")
print("--------------------------")
def imperative():
print("TeddyMark imperative mode")
print("Coprighty ©TeddyMark Lang")
print("--------------------------")
def license():
print(open("license.txt", 'r').read())
| 28.230769
| 47
| 0.577657
| 40
| 367
| 5.35
| 0.575
| 0.130841
| 0.130841
| 0.17757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13624
| 367
| 13
| 48
| 28.230769
| 0.66877
| 0
| 0
| 0.181818
| 0
| 0
| 0.561798
| 0.146067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0
| 0
| 0
| 0.272727
| 0.727273
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
9d482fc1044c74695b74e20854ba3cb2254020db
| 158
|
py
|
Python
|
excript/lista3/q2.py
|
victorers1/anotacoes_curso_python
|
c4ef56bcfc7e3baa3944fc2962e8217c6d720b0e
|
[
"MIT"
] | null | null | null |
excript/lista3/q2.py
|
victorers1/anotacoes_curso_python
|
c4ef56bcfc7e3baa3944fc2962e8217c6d720b0e
|
[
"MIT"
] | null | null | null |
excript/lista3/q2.py
|
victorers1/anotacoes_curso_python
|
c4ef56bcfc7e3baa3944fc2962e8217c6d720b0e
|
[
"MIT"
] | null | null | null |
inf = int(input("Digiteo limite inferior do intervalo: "))
sup = int(input("Digiteo limite superior do intervalo: "))
for i in range(inf,sup+1):
print(i)
| 31.6
| 58
| 0.696203
| 25
| 158
| 4.4
| 0.64
| 0.145455
| 0.272727
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.158228
| 158
| 5
| 59
| 31.6
| 0.819549
| 0
| 0
| 0
| 0
| 0
| 0.477987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
19cb2cf6c18b2c229a9b566dade51713ff1b49b4
| 60
|
py
|
Python
|
talon_draft_window/__init__.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 298
|
2020-02-23T03:00:51.000Z
|
2022-03-30T02:11:00.000Z
|
talon_draft_window/__init__.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 521
|
2020-02-21T18:21:17.000Z
|
2022-03-31T16:40:34.000Z
|
talon_draft_window/__init__.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 499
|
2020-03-07T05:43:52.000Z
|
2022-03-28T12:24:54.000Z
|
# Used so we can use pytest to run this sub-package's tests
| 30
| 59
| 0.75
| 13
| 60
| 3.461538
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 60
| 1
| 60
| 60
| 0.9375
| 0.95
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
19dc18630ffaa09ee1ba5d3eb7ab3fd90a153870
| 446
|
py
|
Python
|
Session/tf_Session.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | 7
|
2017-10-19T13:59:24.000Z
|
2019-11-26T03:40:08.000Z
|
Session/tf_Session.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | null | null | null |
Session/tf_Session.py
|
Asurada2015/TFAPI_translation
|
1c8d9432b0b8a21c2bb5670b25456d095d0a1ecf
|
[
"Apache-2.0"
] | 5
|
2018-08-22T02:57:03.000Z
|
2020-03-05T07:14:21.000Z
|
"""Session类负责数据流图的执行,构造方法为 def __init__(self, target='', graph=None, config=None):
target指定了所要使用的执行引擎,对于大多数的应用该参数取为默认的空白字符串,
在分布式设置中选择使用Session对象时,该参数用于连接不同的tf.train.Server实例
graph参数指定了将要在Session对象中加载的Graph对象,其默认值为None,表示将使用当前默认的数据流图.
当时用多个数据流图时,最好的方式是显式的传入你希望运行的Graph对象(而非在于一个with语句快中创建Session对象)
config参数允许用户指定配置的Session对象所需的选项,如限制CPU或GPU的使用数目,为数据流图设置优化参数及日志选项
在典型的Tensorflow程序中,创建Session对象无需改变任何默认构造参数"""
import tensorflow as tf
tf.Session()
| 40.545455
| 82
| 0.881166
| 33
| 446
| 11.787879
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040359
| 446
| 11
| 83
| 40.545455
| 0.908879
| 0.903587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
19e075f2b5bb10b649b4a52784ecfe9784266d24
| 111
|
py
|
Python
|
goldenmask/__main__.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 6
|
2020-04-28T18:13:54.000Z
|
2021-12-23T18:26:30.000Z
|
goldenmask/__main__.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 1
|
2020-04-28T18:08:46.000Z
|
2020-04-30T12:49:46.000Z
|
goldenmask/__main__.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 1
|
2020-08-18T21:03:39.000Z
|
2020-08-18T21:03:39.000Z
|
import sys
if "__name__" == "__main__":
from goldenmask.cli import goldenmask
sys.exit(goldenmask())
| 15.857143
| 41
| 0.693694
| 13
| 111
| 5.307692
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 111
| 6
| 42
| 18.5
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
c22479a66019470b19d6bc398c914f15896d73e9
| 166
|
py
|
Python
|
gfypy/http/__init__.py
|
badmagick329/gfypy
|
7b2cce3a8945567078c6529887e82972ff913275
|
[
"MIT"
] | 7
|
2020-04-27T14:56:21.000Z
|
2022-01-17T00:02:30.000Z
|
gfypy/http/__init__.py
|
badmagick329/gfypy
|
7b2cce3a8945567078c6529887e82972ff913275
|
[
"MIT"
] | 5
|
2020-05-16T15:39:46.000Z
|
2021-04-01T18:14:52.000Z
|
gfypy/http/__init__.py
|
badmagick329/gfypy
|
7b2cce3a8945567078c6529887e82972ff913275
|
[
"MIT"
] | 2
|
2020-05-19T00:35:18.000Z
|
2021-12-13T03:15:23.000Z
|
try:
from .async_http import AsyncHttpClient
except ImportError:
pass
from .sync_http import SyncHttpClient
__all__ = ["AsyncHttpClient", "SyncHttpClient"]
| 18.444444
| 47
| 0.771084
| 17
| 166
| 7.176471
| 0.705882
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 166
| 8
| 48
| 20.75
| 0.871429
| 0
| 0
| 0
| 0
| 0
| 0.174699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 4
|
dfa7232a453832c826b423d090d13e985d3b2626
| 72
|
py
|
Python
|
locale/pot/api/plotting/_autosummary/pyvista-themes-_DepthPeelingConfig-occlusion_ratio-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 4
|
2020-08-07T08:19:19.000Z
|
2020-12-04T09:51:11.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-themes-_DepthPeelingConfig-occlusion_ratio-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 19
|
2020-08-06T00:24:30.000Z
|
2022-03-30T19:22:24.000Z
|
locale/pot/api/plotting/_autosummary/pyvista-themes-_DepthPeelingConfig-occlusion_ratio-1.py
|
tkoyama010/pyvista-doc-translations
|
23bb813387b7f8bfe17e86c2244d5dd2243990db
|
[
"MIT"
] | 1
|
2021-03-09T07:50:40.000Z
|
2021-03-09T07:50:40.000Z
|
import pyvista
pyvista.global_theme.depth_peeling.occlusion_ratio = 0.0
| 24
| 56
| 0.861111
| 11
| 72
| 5.363636
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0.069444
| 72
| 2
| 57
| 36
| 0.850746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
dfac844cf046f2db92975647024eb03808fd6c5c
| 276
|
py
|
Python
|
Ago-Dic-2019/Jorge Alberto Hernandez Sanchez/Practicas/Practica1/3.8_Seeing_The_World.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ago-Dic-2019/Jorge Alberto Hernandez Sanchez/Practicas/Practica1/3.8_Seeing_The_World.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ago-Dic-2019/Jorge Alberto Hernandez Sanchez/Practicas/Practica1/3.8_Seeing_The_World.py
|
Arbupa/DAS_Sistemas
|
52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
lugares = ["Tokio", "New York", "San Francisco", "Monterrey", "Las Vegas"]
print(lugares)
print(sorted(lugares))
print(lugares)
lugares.reverse()
print(lugares)
lugares.reverse()
print(lugares)
lugares.sort()
print(lugares)
lugares.sort(reverse=True)
print(lugares)
| 12
| 74
| 0.721014
| 34
| 276
| 5.852941
| 0.411765
| 0.361809
| 0.38191
| 0.261307
| 0.356784
| 0.356784
| 0.356784
| 0
| 0
| 0
| 0
| 0
| 0.112319
| 276
| 23
| 75
| 12
| 0.812245
| 0
| 0
| 0.666667
| 0
| 0
| 0.158845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.583333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
dfc18bc3fc77e0069024017e05938621dfaa37b7
| 87
|
py
|
Python
|
xiamiu/apps.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
xiamiu/apps.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
xiamiu/apps.py
|
Vida42/xiamiu
|
9249a20746d1da050546e3fcdfafbc5ff49ab4d0
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class XiamiuConfig(AppConfig):
name = 'xiamiu'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
dfcb01dc4fd2eec4a948686d82a856ca5177d65a
| 1,178
|
py
|
Python
|
tests/test_stats.py
|
mikecokina/pyopentsdb
|
b8d78e8f42aed4ebbd6ac3aff925071de41d6b52
|
[
"MIT"
] | 2
|
2018-05-09T08:34:30.000Z
|
2018-09-25T22:42:09.000Z
|
tests/test_stats.py
|
mikecokina/pyopentsdb
|
b8d78e8f42aed4ebbd6ac3aff925071de41d6b52
|
[
"MIT"
] | 2
|
2018-12-24T10:51:30.000Z
|
2019-01-21T13:55:11.000Z
|
tests/test_stats.py
|
mikecokina/pyopentsdb
|
b8d78e8f42aed4ebbd6ac3aff925071de41d6b52
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
from tests.testutils import get_mock_requests_get
from tests.testutils import GeneralUrlTestCase
class TsdbStatsTestCase(unittest.TestCase):
@mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None))
def test_url_stats(self, _):
GeneralUrlTestCase.test_url(self, "/api/stats/", "stats")
@mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None))
def test_url_jvm_stats(self, _):
GeneralUrlTestCase.test_url(self, "/api/stats/jvm/", "jvm_stats")
@mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None))
def test_url_query_stats(self, _):
GeneralUrlTestCase.test_url(self, "/api/stats/query/", "query_stats")
@mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None))
def test_url_region_clients_stats(self, _):
GeneralUrlTestCase.test_url(self, "/api/stats/region_clients/", "region_clients")
@mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None))
def test_url_threads_stats(self, _):
GeneralUrlTestCase.test_url(self, "/api/stats/threads/", "threads")
| 40.62069
| 89
| 0.750424
| 156
| 1,178
| 5.333333
| 0.179487
| 0.084135
| 0.108173
| 0.129808
| 0.709135
| 0.709135
| 0.709135
| 0.709135
| 0.432692
| 0.432692
| 0
| 0
| 0.122241
| 1,178
| 28
| 90
| 42.071429
| 0.804642
| 0
| 0
| 0.25
| 0
| 0
| 0.198811
| 0.02209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
dfccbb4cb8d912cfb7bf230ff8708b5d0629934f
| 167
|
py
|
Python
|
blog/apps.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
blog/apps.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
blog/apps.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
# A configuration class for the blog application.
class BlogConfig(AppConfig):
# Set the application's name.
name = 'blog'
| 20.875
| 49
| 0.736527
| 22
| 167
| 5.590909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191617
| 167
| 7
| 50
| 23.857143
| 0.911111
| 0.449102
| 0
| 0
| 0
| 0
| 0.044944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5f0760725b8083cda0c95094541a8ce0c2bfff41
| 81
|
py
|
Python
|
various/import_order/package_example/__init__.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | null | null | null |
various/import_order/package_example/__init__.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | 4
|
2020-03-24T18:00:07.000Z
|
2021-06-02T00:51:22.000Z
|
various/import_order/package_example/__init__.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | null | null | null |
# this is an empty file that tells python that the current directory is a package
| 81
| 81
| 0.802469
| 15
| 81
| 4.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 81
| 1
| 81
| 81
| 0.984848
| 0.975309
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5f21336f4a639b59228bafd7a8604746c31eb375
| 129
|
py
|
Python
|
get_cookies.py
|
laopan2020/epic-games-claimer
|
8251ba0075db8ba6fdf34308edd8f1722f2bdf36
|
[
"MIT"
] | null | null | null |
get_cookies.py
|
laopan2020/epic-games-claimer
|
8251ba0075db8ba6fdf34308edd8f1722f2bdf36
|
[
"MIT"
] | null | null | null |
get_cookies.py
|
laopan2020/epic-games-claimer
|
8251ba0075db8ba6fdf34308edd8f1722f2bdf36
|
[
"MIT"
] | null | null | null |
from epicgames_claimer import login
COOKIES_PATH = "cookies.json"
if __name__ == "__main__":
login(COOKIES_PATH)
| 14.333333
| 36
| 0.697674
| 15
| 129
| 5.266667
| 0.733333
| 0.303797
| 0.405063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217054
| 129
| 8
| 37
| 16.125
| 0.782178
| 0
| 0
| 0
| 0
| 0
| 0.165289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a028029830c0a21ab92c825687fb4ac65db3977e
| 67
|
py
|
Python
|
setup.py
|
JamesOwers/corrupted_midi_dataset
|
f827c1b6dc3cb51349fb1f6b5206152aed8c21c6
|
[
"MIT"
] | 29
|
2019-07-26T00:11:28.000Z
|
2021-10-22T03:49:29.000Z
|
setup.py
|
JamesOwers/corrupted_midi_dataset
|
f827c1b6dc3cb51349fb1f6b5206152aed8c21c6
|
[
"MIT"
] | 143
|
2019-06-28T10:29:39.000Z
|
2021-08-06T13:36:05.000Z
|
setup.py
|
JamesOwers/double-jig-gen
|
bab19baf80894a3e315fbc30d8aa650b12a32ba1
|
[
"MIT"
] | 3
|
2020-10-10T06:08:43.000Z
|
2021-02-15T09:13:12.000Z
|
"""Script for setuptools."""
import setuptools
setuptools.setup()
| 13.4
| 28
| 0.746269
| 7
| 67
| 7.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 4
| 29
| 16.75
| 0.833333
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a04a85900b51b9516dffb2815a8de6c5420f4e90
| 238
|
py
|
Python
|
MemeVoteBot/Repository/VoteRepository/i_vote_repository.py
|
bufo24/telegram-poll-bot
|
4e8f7b8bedb47aa239132cf2cd043599a562f045
|
[
"MIT"
] | null | null | null |
MemeVoteBot/Repository/VoteRepository/i_vote_repository.py
|
bufo24/telegram-poll-bot
|
4e8f7b8bedb47aa239132cf2cd043599a562f045
|
[
"MIT"
] | 2
|
2021-11-19T20:47:54.000Z
|
2021-11-19T21:04:55.000Z
|
MemeVoteBot/Repository/VoteRepository/i_vote_repository.py
|
bufo24/telegram-poll-bot
|
4e8f7b8bedb47aa239132cf2cd043599a562f045
|
[
"MIT"
] | 1
|
2021-11-15T18:55:13.000Z
|
2021-11-15T18:55:13.000Z
|
from MemeVoteBot.Models.vote import Vote
from MemeVoteBot.Repository.repository import Repository
class IVoteRepository(Repository[Vote]):
def find_votes_by_userid(self, userid: int) -> list[Vote]:
raise NotImplementedError
| 29.75
| 62
| 0.789916
| 28
| 238
| 6.607143
| 0.642857
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 238
| 7
| 63
| 34
| 0.898058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
a05062cb56fbc8087658c8442744a3e3aa0be2e9
| 244
|
py
|
Python
|
Python_do_zero_Guanabara/02_TratandoDadoseFazendoContas/desafio/08_desafio.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
Python_do_zero_Guanabara/02_TratandoDadoseFazendoContas/desafio/08_desafio.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
Python_do_zero_Guanabara/02_TratandoDadoseFazendoContas/desafio/08_desafio.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
#Escreva um programa que leia um valor em metros e converta em centimetros e milimetros.
m = float(input('Informe uma metragem: '))
print(f'A metragem informada convertida em centimetros é {m*100:.2f}cm e em milimetros ficaria {m*1000:.0f}mm')
| 61
| 111
| 0.762295
| 42
| 244
| 4.428571
| 0.738095
| 0.139785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042857
| 0.139344
| 244
| 4
| 111
| 61
| 0.842857
| 0.356557
| 0
| 0
| 0
| 0.5
| 0.783439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
a054af9562fe88a8d29de5db4cbdaf8f6940b0d8
| 131
|
py
|
Python
|
old/translatebin.py
|
archu2020/python-2
|
19c626ca9fd37168db8a7ac075fd80c8e2971313
|
[
"Apache-2.0"
] | 48
|
2017-12-24T12:19:55.000Z
|
2022-02-26T13:14:27.000Z
|
old/translatebin.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 3
|
2018-12-05T08:48:14.000Z
|
2020-07-29T01:56:16.000Z
|
old/translatebin.py
|
17610178081/python
|
3975c678d985c468deecd03560d882e9d316bb63
|
[
"Apache-2.0"
] | 113
|
2017-08-09T03:10:04.000Z
|
2022-03-26T16:05:01.000Z
|
temp=input("请输入一个十进制数:")
num=int(temp)
print('十进制 -> 二进制 : %d -> ' %num, bin(num))
print('十进制 -> 八进制 : %d -> %o' %(num,num))
| 26.2
| 45
| 0.519084
| 20
| 131
| 3.4
| 0.6
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19084
| 131
| 4
| 46
| 32.75
| 0.641509
| 0
| 0
| 0
| 0
| 0
| 0.393701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
a06700b01c7d1b3f2463336eceef4fa765432384
| 640
|
py
|
Python
|
python/1018.py
|
LourdesOshiroIgarashi/uri-begginner
|
05f7993dfde2c7cc49e5b74907dee6297c82f447
|
[
"MIT"
] | 3
|
2021-05-17T05:39:08.000Z
|
2021-05-23T05:14:54.000Z
|
python/1018.py
|
LourdesOshiroIgarashi/uri-beginner
|
05f7993dfde2c7cc49e5b74907dee6297c82f447
|
[
"MIT"
] | null | null | null |
python/1018.py
|
LourdesOshiroIgarashi/uri-beginner
|
05f7993dfde2c7cc49e5b74907dee6297c82f447
|
[
"MIT"
] | null | null | null |
valor = int(input())
a = valor // 100 # 100
b = (valor % 100) // 50 # 50
c = ((valor % 100) % 50) // 20 # 20
d = (((valor % 100) % 50) % 20) // 10 # 10
e = (((valor % 100) % 50) % 20) % 10 // 5 # 5
f = ((((valor % 100) % 50) % 20) % 10) % 5 // 2 # 2
g = (((((valor % 100) % 50) % 20) % 10) % 5) % 2 // 1 # 1
print(valor)
print("{} nota(s) de R$ 100,00".format(a))
print("{} nota(s) de R$ 50,00".format(b))
print("{} nota(s) de R$ 20,00".format(c))
print("{} nota(s) de R$ 10,00".format(d))
print("{} nota(s) de R$ 5,00".format(e))
print("{} nota(s) de R$ 2,00".format(f))
print("{} nota(s) de R$ 1,00".format(g))
| 33.684211
| 59
| 0.46875
| 117
| 640
| 2.564103
| 0.205128
| 0.186667
| 0.233333
| 0.28
| 0.506667
| 0.156667
| 0.106667
| 0
| 0
| 0
| 0
| 0.196687
| 0.245313
| 640
| 18
| 60
| 35.555556
| 0.424431
| 0.028125
| 0
| 0
| 0
| 0
| 0.255034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
a0752034e579fba50856da00334a5ff3a1415f8d
| 1,847
|
py
|
Python
|
Classfication/DoExpAll.py
|
raja21068/Android_Malware_Detection
|
4164eac199c619ab0207f1ad519fd88a2020e356
|
[
"Apache-2.0"
] | 2
|
2020-09-10T03:53:31.000Z
|
2020-10-05T14:36:54.000Z
|
Classfication/DoExpAll.py
|
raja21068/Android_Malware_Detection
|
4164eac199c619ab0207f1ad519fd88a2020e356
|
[
"Apache-2.0"
] | null | null | null |
Classfication/DoExpAll.py
|
raja21068/Android_Malware_Detection
|
4164eac199c619ab0207f1ad519fd88a2020e356
|
[
"Apache-2.0"
] | 2
|
2020-10-31T09:11:45.000Z
|
2021-09-10T00:16:52.000Z
|
import os
import MNN
import DNN
import gc
import time
"""
DNN.main( arffindex=0,
datestr_p= "2017-08-12",
learningRate_p= 0.01,
dropRate_p= 0.2,
AllTorOneF_p= True,
L1_p= 0.0,
L2_p= 0.0,
datestr= "2017-08-12",
learningRate= 0.01,
dropRate= 0.2,
AllTorOneF= True,
L1= 0.0,
L2= 0.0,
initflag= True,
LoadnEval= False,
NoValids= True,
NoCallBack= True,
NoFreeze= True,
noEpoch= 100 )
"""
score = MNN.main( arffindex=0,
datestr_p= "2017-08-12",
learningRate_p= 0.01,
dropRate_p= 0.2,
AllTorOneF_p= True,
L1_p= 0.0,
L2_p= 0.0,
datestr= "2017-08-12",
learningRate= 0.01,
dropRate= 0.2,
AllTorOneF= True,
L1= 0.0,
L2= 0.0,
initflag= True,
LoadnEval= False,
NoValids= True,
NoCallBack= True,
NoFreeze= False,
noEpoch= 100 )
"""
results = list()
for i in range(63):
print(i)
score = MNN.main( arffindex=i,
datestr_p= "2017-08-12",
learningRate_p= 0.01,
dropRate_p= 0.2,
AllTorOneF_p= True,
L1_p= 0.0,
L2_p= 0.0,
datestr= "2017-08-12",
learningRate= 0.01,
dropRate= 0.2,
AllTorOneF= True,
L1= 0.0,
L2= 0.0,
initflag= False,
LoadnEval= True,
NoValids= True,
NoCallBack= True,
NoFreeze= True,
noEpoch= 100 )
# DNN.main( arffindex=i,
# datestr_p= "2017-07-28",
# learningRate_p= 0.01,
# dropRate_p= 0.2,
# AllTorOneF_p= True,
# L1_p= 0.0,
# L2_p= 0.0,
# datestr= "2017-07-28",
# learningRate= 0.01,
# dropRate= 0.2,
# AllTorOneF= True,
# L1= 0.0,
# L2= 0.0,
# initflag= False,
# LoadnEval= True,
# NoValids= False,
# NoCallBack= True,
# NoFreeze= True,
# noEpoch= 100 )
results.append(score)
gc.collect()
time.sleep(20)
print(results)
f = open('genomeMNN.txt','a')
f.write(str(score))
f.close()
"""
| 18.287129
| 31
| 0.585815
| 274
| 1,847
| 3.861314
| 0.193431
| 0.030246
| 0.083176
| 0.113422
| 0.799622
| 0.799622
| 0.727788
| 0.727788
| 0.676749
| 0.676749
| 0
| 0.123636
| 0.25555
| 1,847
| 101
| 32
| 18.287129
| 0.645818
| 0
| 0
| 0
| 0
| 0
| 0.04329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.208333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a093813f6b0e3ec1d6e42d3ad2f4da2b0d61e62b
| 662
|
py
|
Python
|
DynamicETLDashboard/DynamicETL_Validator/APIs/FlaskAPITemplate.py
|
BRutan/DynamicETLDashboard
|
8a40e6f51e53f084d6103ba41cd675916505652f
|
[
"MIT"
] | null | null | null |
DynamicETLDashboard/DynamicETL_Validator/APIs/FlaskAPITemplate.py
|
BRutan/DynamicETLDashboard
|
8a40e6f51e53f084d6103ba41cd675916505652f
|
[
"MIT"
] | null | null | null |
DynamicETLDashboard/DynamicETL_Validator/APIs/FlaskAPITemplate.py
|
BRutan/DynamicETLDashboard
|
8a40e6f51e53f084d6103ba41cd675916505652f
|
[
"MIT"
] | null | null | null |
#####################################
# FlaskAPITemplate.py
#####################################
# Description:
# * Abstract base class that defines
# common functionality interface to convert
# groups of Flask classes into xml templates
# that can be used to create Flask python applications.
from abc import ABC, abstractmethod, abstractproperty
from bs4 import BeautifulSoup as Soup
class FlaskAPITemplate(ABC):
"""
* Abstract base class that defines
common functionality interface to convert
groups of Flask classes into xml templates
that can be used to create Flask python applications.
"""
def __init__(self):
pass
| 28.782609
| 57
| 0.664653
| 74
| 662
| 5.891892
| 0.527027
| 0.055046
| 0.077982
| 0.09633
| 0.665138
| 0.665138
| 0.665138
| 0.665138
| 0.665138
| 0.665138
| 0
| 0.001852
| 0.18429
| 662
| 22
| 58
| 30.090909
| 0.805556
| 0.575529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.4
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
a09a86198731b64079f445414bcfe2727be30cee
| 105
|
py
|
Python
|
control-api/app/schemas/video_preference.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | 1
|
2021-05-10T21:36:59.000Z
|
2021-05-10T21:36:59.000Z
|
control-api/app/schemas/video_preference.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | null | null | null |
control-api/app/schemas/video_preference.py
|
Towed-ROV/control-api
|
e7061e60fae7f5218cf98e856da369be71fe0740
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class VideoPreference(BaseModel):
action: str
display_mode: str
| 15
| 33
| 0.761905
| 12
| 105
| 6.583333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 105
| 6
| 34
| 17.5
| 0.929412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
a0bbc30bc3eb22b29357cd7e0adf0f6946431a11
| 131
|
py
|
Python
|
grammpy/parsers/__init__.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:08.000Z
|
2021-02-04T12:41:08.000Z
|
grammpy/parsers/__init__.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 3
|
2017-07-08T16:28:52.000Z
|
2020-04-23T18:06:24.000Z
|
grammpy/parsers/__init__.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:10.000Z
|
2021-02-04T12:41:10.000Z
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 09:48
:Licence MIT
Part of grammpy
"""
from .CYK import cyk
| 13.1
| 25
| 0.709924
| 22
| 131
| 4.227273
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 0.152672
| 131
| 10
| 26
| 13.1
| 0.72973
| 0.755725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
39fe6cab1439559c32a906db5e35a19e3aae09d0
| 57
|
py
|
Python
|
lib/dao/rdbms/market.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
lib/dao/rdbms/market.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
lib/dao/rdbms/market.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Nov 11, 2017
@author: the-constant
'''
| 8.142857
| 23
| 0.614035
| 8
| 57
| 4.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.210526
| 57
| 6
| 24
| 9.5
| 0.644444
| 0.824561
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2602cad63634facf75dcd0c40df8c234fe287111
| 220
|
py
|
Python
|
pythonCodigos/Django/quiz-devpro/quiz/base/admin.py
|
fabiolealsc/estudandoPython
|
be0e9211ba3d596a15a9427b612c537a102b858e
|
[
"MIT"
] | 3
|
2021-09-05T16:50:06.000Z
|
2021-11-08T08:56:51.000Z
|
pythonCodigos/Django/quiz-devpro/quiz/base/admin.py
|
fabiolealsc/estudandoPython
|
be0e9211ba3d596a15a9427b612c537a102b858e
|
[
"MIT"
] | null | null | null |
pythonCodigos/Django/quiz-devpro/quiz/base/admin.py
|
fabiolealsc/estudandoPython
|
be0e9211ba3d596a15a9427b612c537a102b858e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from quiz.base.models import Pergunta
# Register your models here.
@admin.register(Pergunta)
class PerguntaAdmin(admin.ModelAdmin):
list_display = ('id', 'enunciado', 'disponivel')
| 24.444444
| 52
| 0.768182
| 27
| 220
| 6.222222
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122727
| 220
| 8
| 53
| 27.5
| 0.870466
| 0.118182
| 0
| 0
| 0
| 0
| 0.109375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
261d9c3271f7db7efc834dd50ee17f688415f3ff
| 369
|
py
|
Python
|
PyOptimizer/Optimizer.py
|
alesgaco/PyOptimizer
|
0b9f47a72d0288df828ef575eac993c31564d1f1
|
[
"MIT"
] | null | null | null |
PyOptimizer/Optimizer.py
|
alesgaco/PyOptimizer
|
0b9f47a72d0288df828ef575eac993c31564d1f1
|
[
"MIT"
] | null | null | null |
PyOptimizer/Optimizer.py
|
alesgaco/PyOptimizer
|
0b9f47a72d0288df828ef575eac993c31564d1f1
|
[
"MIT"
] | null | null | null |
from PyOptimizer.Model import Model
class Optimizer:
def __init__(self, model):
if(isinstance(model,Model)):
self.model = model
else:
print("Model invalid")
pass
def setModel(self,model):
if (isinstance(model, Model)):
self.model = model
else:
print("Model invalid")
pass
| 23.0625
| 38
| 0.560976
| 39
| 369
| 5.205128
| 0.410256
| 0.17734
| 0.108374
| 0.206897
| 0.689655
| 0.689655
| 0.689655
| 0.689655
| 0.689655
| 0.689655
| 0
| 0
| 0.341463
| 369
| 15
| 39
| 24.6
| 0.835391
| 0
| 0
| 0.714286
| 0
| 0
| 0.070461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0.071429
| 0
| 0.285714
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
2626a47292ff4e290bdbf9d0c2ca03580c62db66
| 5,371
|
py
|
Python
|
Density.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-08-26T05:10:56.000Z
|
2018-08-26T05:10:56.000Z
|
Density.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | null | null | null |
Density.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-06-26T18:06:44.000Z
|
2018-06-26T18:06:44.000Z
|
import numpy
import sys
import conf
import math
DEN=conf.DEN
AN1=conf.AN1
AN2=conf.AN2
AN3=conf.AN3
AN4=conf.AN4
AN5=conf.AN5
AN6=conf.AN6
AN=conf.AN
FRAC=conf.FRAC
NGAS=conf.NGAS
NSTEP=conf.NSTEP
NANISO=conf.NANISO
EFINAL=conf.EFINAL
ESTEP=conf.ESTEP
AKT=conf.AKT
ARY=conf.ARY
TEMPC=conf.TEMPC
TORR=conf.TORR
IPEN=conf.IPEN
NGASN=conf.NGASN
BET=conf.BET
GAM=conf.GAM
VC=conf.VC
EMS=conf.EMS
# from test_degrad import *
def DENSITY():
#IMPLICIT #real*8 (A-H,O-Z)
#IMPLICIT #integer*8 (I-N)
global DEN #=[0 for x in range(20000)]
global AN1,AN2,AN3,AN4,AN5,AN6,AN,FRAC #=[0 for x in range(6)]
global NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
global NGASN #=[0 for x in range[6]]
global BET#=[0 for x in range(2000)]
global GAM#=[0 for x in range(20000)]
global VC,EMS
###############################################
DEN=conf.DEN
AN1=conf.AN1
AN2=conf.AN2
AN3=conf.AN3
AN4=conf.AN4
AN5=conf.AN5
AN6=conf.AN6
AN=conf.AN
FRAC=conf.FRAC
NGAS=conf.NGAS
NSTEP=conf.NSTEP
NANISO=conf.NANISO
EFINAL=conf.EFINAL
ESTEP=conf.ESTEP
AKT=conf.AKT
ARY=conf.ARY
TEMPC=conf.TEMPC
TORR=conf.TORR
IPEN=conf.IPEN
NGASN=conf.NGASN
BET=conf.BET
GAM=conf.GAM
VC=conf.VC
EMS=conf.EMS
###############################################
AND=numpy.zeros(6+1)
EIAV=numpy.zeros(80+1)
X00=numpy.zeros(80+1)
X11=numpy.zeros(80+1)
AKS=numpy.zeros(80+1)
AAA=numpy.zeros(80+1)
JELEC=numpy.zeros(80+1)
# DENSITY EFFECT CONSTANTS
# EIAV ENERGY IN EV
# JELEC NUMBER OF ELECTRONS PER ATOM OR MOLECULE
EIAV=[0,115.0,188.0,41.8,41.8,137.0,352.0,482.0,41.7,45.4,47.1,48.3,85.0,0.0,71.6,95.0,82.0,0.0,0.0,0.0,0.0,19.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,128.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,48.3,*36*[0.0]]
# print(len(EIAV))
JELEC=[0,42,18,2,2,10,36,54,10,18,26,34,22,0,10,16,14,0,0,0,0,2,0,0,0,0,0,0,0,0,70,0,0,0,0,0,0,0,0,0,0,0,0,0,34]+36*[0]
X00=[0,1.70,1.7635,2.2017,2.2017,2.0735,1.7158,1.5630,1.6263,1.5090,1.4339,1.3788,1.6294,0.0,1.7952,1.7541,1.7378,0.0,0.0,0.0,0.0,1.8639,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.6,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.3788,*36*[0.0]]
X11=[0,4.00,4.4855,3.6122,3.6122,4.6421,5.0748,4.7371,3.9716,3.8726,3.8011,3.7524,4.1825,0.0,4.3437,4.3213,4.1323,0.0,0.0,0.0,0.0,3.2718,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,4.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3.7524,*36*[0.0]]
AKS=[0,3.00,2.9618,5.8347,5.8347,3.5771,3.4051,2.7414,3.6257,3.6095,3.5920,3.4884,3.3227,0.0,3.5901,3.2913,3.2125,0.0,0.0,0.0,0.0,5.7273,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,3.4884,*36*[0.0]]
AAA=[0,.18551,.19714,.13443,.13443,.08064,.07446,.23314,.09253,0.09627,0.09916,.10852,.11768,0.0,.08101,.11778,.15349,0.0,0.0,0.0,0.0,.14092,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,.177484,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,.10852,*36*[0.0]]
#
API=numpy.arccos(-1.00)
EMS=510998.9280
RE=2.8179403267*(10**-13)
ALPH=137.035999074
ABZERO=273.150
ATMOS=760.00
#
# DENSITY EFFECT CALCULATION
AND[1]=AN1
AND[2]=AN2
AND[3]=AN3
AND[4]=AN4
AND[5]=AN5
AND[6]=AN6
HSUM=0.0
SUM1=0.0
SUMDNOM=0.0
# print(NGAS)
for L1 in range(1,NGAS+1):
# print("E",EIAV[int(NGASN[L1])],NGASN[L1])
SUM1=SUM1+FRAC[L1]*float(JELEC[int(NGASN[L1])])*math.log(EIAV[int(NGASN[L1])])
SUMDNOM=SUMDNOM+FRAC[L1]*float(JELEC[int(NGASN[L1])])
HSUM=HSUM+AND[L1]*float(JELEC[int(NGASN[L1])]) #22385
EIBAR=math.exp(SUM1/SUMDNOM)
# PLASMA ENERGY
HWP1=math.sqrt(4.0*API*HSUM*RE**3)*ALPH*EMS
#
DELDEN=math.log(EIBAR/HWP1)
CBAR=1.0+2.0*DELDEN
flag=0 #SELF ADDED
if(NGAS == 1): #22392
flag=1
# CALC X0 AND X1
if(CBAR < 10.0):
X0=1.6
X1=4.0
elif(CBAR >= 4.0 and CBAR < 10.5) :
X0=1.7
X1=4.0
elif(CBAR >= 10.5 and CBAR < 11.0) :
X0=1.8
X1=4.0
elif(CBAR >= 11.0 and CBAR < 11.5) :
X0=1.9
X1=4.0
elif(CBAR >= 11.5 and CBAR < 12.25) :
X0=2.0
X1=4.0
elif(CBAR >= 12.25 and CBAR < 13.804) :
X0=2.0
X1=5.0
else:
X0=0.326*CBAR-1.5
X1=5.0
# endif
if(flag==1):
AKBAR=3.0
ABAR=(CBAR-2.0*math.log(10.00)*X0)/((X1-X0)**3)
elif(flag==0):
AKBAR=AKS[int(NGASN[1])]
X0=X00[int(NGASN[1])]
X1=X11[int(NGASN[1])]
ABAR=AAA[int(NGASN[1])]
else:
pass
# CORRECT X0 AND X1 FOR DENSITY CHANGE FROM 20C AND 760 TORR
# NB CORRECTION TO CBAR ALREADY DONE
DCOR=0.5*math.log10(TORR*293.15/(760.0*(TEMPC+ABZERO)))
X0=X0-DCOR
X1=X1-DCOR
# CALCULATE DENSITY CORRECTION FACTOR ARRAY DEN(20000)
AFC=2.0*math.log(10.00)
for I in range(1,20000+1):
BG=BET[I]*GAM[I]
X=math.log10(BG)
if(X < X0):
DEN[I]=0.0
elif(X > X0 and X < X1) :
DEN[I]=ABAR*math.exp(AKBAR*math.log(X1-X))+AFC*X-CBAR
else:
DEN[I]=AFC*X-CBAR
# endif
# WRITE(6,99) DEN[I]
# 99 print(' DENSITY CORRECTION=',D12.5)
conf.DEN=DEN
conf.AN1=AN1
conf.AN2=AN2
conf.AN3=AN3
conf.AN4=AN4
conf.AN5=AN5
conf.AN6=AN6
conf.AN=AN
conf.FRAC=FRAC
conf.NGAS=NGAS
conf.NSTEP=NSTEP
conf.NANISO=NANISO
conf.EFINAL=EFINAL
conf.ESTEP=ESTEP
conf.AKT=AKT
conf.ARY=ARY
conf.TEMPC=TEMPC
conf.TORR=TORR
conf.IPEN=IPEN
conf.NGASN=NGASN
conf.BET=BET
conf.GAM=GAM
conf.VC=VC
conf.EMS=EMS
return
# end
| 27.685567
| 251
| 0.611246
| 1,245
| 5,371
| 2.636145
| 0.191968
| 0.16819
| 0.223035
| 0.274223
| 0.331505
| 0.31688
| 0.293723
| 0.252285
| 0.24741
| 0.236137
| 0
| 0.245945
| 0.150624
| 5,371
| 193
| 252
| 27.829016
| 0.473477
| 0.135729
| 0
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005952
| false
| 0.005952
| 0.02381
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
263f974938517eff4defccb1581e38453ec7e16e
| 281
|
wsgi
|
Python
|
techBoard.wsgi
|
tannercrook/techboard
|
d25e6eeadbfd2cdb3ee35829e09982d7de51b19c
|
[
"MIT"
] | null | null | null |
techBoard.wsgi
|
tannercrook/techboard
|
d25e6eeadbfd2cdb3ee35829e09982d7de51b19c
|
[
"MIT"
] | null | null | null |
techBoard.wsgi
|
tannercrook/techboard
|
d25e6eeadbfd2cdb3ee35829e09982d7de51b19c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import logging
import os
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/techBoard/techBoard/")
from techBoard import app as application
application.secret_key = '\xfc\x123\xda\x06\xc8o\xf3\x95\x01\xafaU\\\xc1Z\xa4\xa9C\xddo\x020]'
| 28.1
| 94
| 0.779359
| 45
| 281
| 4.844444
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068702
| 0.067616
| 281
| 9
| 95
| 31.222222
| 0.763359
| 0.05694
| 0
| 0
| 0
| 0.142857
| 0.363636
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
26645e95e3379236eb8bdb95863e4cbc5cdc68c8
| 143
|
py
|
Python
|
Python/Ex005.py
|
renato-rt/Python
|
ba033094e1da5b55cf9ce4c8a5cf2cd90247db36
|
[
"MIT"
] | null | null | null |
Python/Ex005.py
|
renato-rt/Python
|
ba033094e1da5b55cf9ce4c8a5cf2cd90247db36
|
[
"MIT"
] | null | null | null |
Python/Ex005.py
|
renato-rt/Python
|
ba033094e1da5b55cf9ce4c8a5cf2cd90247db36
|
[
"MIT"
] | 1
|
2021-11-30T17:34:33.000Z
|
2021-11-30T17:34:33.000Z
|
n = int(input('\033[1;33;1mDigite um número: '))
print('Analisando o valor {} seu antecessor é {} e seu sucessor é {}'.format(n, (n-1), (n+1)))
| 71.5
| 94
| 0.629371
| 26
| 143
| 3.461538
| 0.730769
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.13986
| 143
| 2
| 94
| 71.5
| 0.658537
| 0
| 0
| 0
| 0
| 0
| 0.631944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
2666ed6ad75f18099eb912daea8218dd0482480e
| 4,910
|
py
|
Python
|
tests/view/test_publication.py
|
pauloaugusto-dmf/blog_django
|
7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8
|
[
"MIT"
] | 2
|
2021-12-31T22:14:31.000Z
|
2021-12-31T22:14:34.000Z
|
tests/view/test_publication.py
|
pauloaugusto-dmf/blog_django
|
7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8
|
[
"MIT"
] | null | null | null |
tests/view/test_publication.py
|
pauloaugusto-dmf/blog_django
|
7374e85dd4f0622aefbbb99d27ceb85f19fd1cd8
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
pytestmark = pytest.mark.django_db
class TestHomeView:
class TestPublicationHomeView:
def test_reverse_resolve(self):
assert reverse("publication:home") == "/"
assert resolve("/").view_name == "publication:home"
def test_status_code(self, client):
response = client.get(reverse("publication:home"))
assert response.status_code == 200
class TestTopicViews:
class TestTopicListView:
def test_reverse_resolve(self):
assert reverse("publication:list_topic") == "/topic/list/"
assert resolve("/topic/list/").view_name == "publication:list_topic"
def test_status_code(self, client):
response = client.get(reverse("publication:list_topic"))
assert response.status_code == 200
class TestTopicCreateView:
def test_reverse_resolve(self):
assert reverse("publication:create_topic") == "/topic/create/"
assert resolve("/topic/create/").view_name == "publication:create_topic"
def test_status_code(self, client):
response = client.get(reverse("publication:create_topic"))
assert response.status_code == 200
class TestTopicUpdateView:
def test_reverse_resolve(self, topic):
url = reverse("publication:update_topic", kwargs={"slug": topic.slug})
assert url == f"/topic/{topic.slug}/update/"
view_name = resolve(f"/topic/{topic.slug}/update/").view_name
assert view_name == "publication:update_topic"
def test_status_code(self, client, topic):
response = client.get(
reverse("publication:update_topic", kwargs={"slug": topic.slug})
)
assert response.status_code == 200
class TestTopicDeleteView:
def test_reverse_resolve(self, topic):
url = reverse("publication:delete_topic", kwargs={"slug": topic.slug})
assert url == f"/topic/{topic.slug}/delete/"
view_name = resolve(f"/topic/{topic.slug}/delete/").view_name
assert view_name == "publication:delete_topic"
def test_status_code(self, client, topic):
response = client.get(
reverse("publication:delete_topic", kwargs={"slug": topic.slug})
)
assert response.status_code == 200
class TestPostViews:
class TestPostListView:
def test_reverse_resolve(self):
assert reverse("publication:list_post") == "/post/list/"
assert resolve("/post/list/").view_name == "publication:list_post"
def test_status_code(self, client):
response = client.get(reverse("publication:list_post"))
assert response.status_code == 200
class TestPostDetailView:
def test_reverse_resolve(self, post):
url = reverse("publication:detail_post", kwargs={"slug": post.slug})
assert url == f"/post/{post.slug}/"
view_name = resolve(f"/post/{post.slug}/").view_name
assert view_name == "publication:detail_post"
def test_status_code(self, client, post):
response = client.get(
reverse("publication:detail_post", kwargs={"slug": post.slug})
)
assert response.status_code == 200
class TestPostCreateView:
def test_reverse_resolve(self):
assert reverse("publication:create_post") == "/post/create/"
assert resolve("/post/create/").view_name == "publication:create_post"
def test_status_code(self, client):
response = client.get(reverse("publication:create_post"))
assert response.status_code == 200
class TestPostUpdateView:
def test_reverse_resolve(self, post):
url = reverse("publication:update_post", kwargs={"slug": post.slug})
assert url == f"/post/{post.slug}/update/"
view_name = resolve(f"/post/{post.slug}/update/").view_name
assert view_name == "publication:update_post"
def test_status_code(self, client, post):
response = client.get(
reverse("publication:update_post", kwargs={"slug": post.slug})
)
assert response.status_code == 200
class TestPostDeleteView:
def test_reverse_resolve(self, post):
url = reverse("publication:delete_post", kwargs={"slug": post.slug})
assert url == f"/post/{post.slug}/delete/"
view_name = resolve(f"/post/{post.slug}/delete/").view_name
assert view_name == "publication:delete_post"
def test_status_code(self, client, post):
response = client.get(
reverse("publication:delete_post", kwargs={"slug": post.slug})
)
assert response.status_code == 200
| 38.968254
| 84
| 0.619756
| 530
| 4,910
| 5.556604
| 0.092453
| 0.047538
| 0.047538
| 0.071307
| 0.832258
| 0.792869
| 0.770798
| 0.66961
| 0.652971
| 0.407131
| 0
| 0.008253
| 0.259674
| 4,910
| 125
| 85
| 39.28
| 0.801926
| 0
| 0
| 0.364583
| 0
| 0
| 0.214868
| 0.168839
| 0
| 0
| 0
| 0
| 0.3125
| 1
| 0.208333
| false
| 0
| 0.020833
| 0
| 0.364583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
266b756013419bd912bd11931a0266d4710b95af
| 85
|
py
|
Python
|
step_functions/step_functions/settings.example.py
|
thinkAmi-sandbox/AWS_CDK-sample
|
8a5c92dd0aba56d0d5c61873e1d0762a26cd8dd4
|
[
"Unlicense"
] | null | null | null |
step_functions/step_functions/settings.example.py
|
thinkAmi-sandbox/AWS_CDK-sample
|
8a5c92dd0aba56d0d5c61873e1d0762a26cd8dd4
|
[
"Unlicense"
] | null | null | null |
step_functions/step_functions/settings.example.py
|
thinkAmi-sandbox/AWS_CDK-sample
|
8a5c92dd0aba56d0d5c61873e1d0762a26cd8dd4
|
[
"Unlicense"
] | null | null | null |
AWS_SCIPY_ARN = 'arn:aws:lambda:region:account_id:layer:AWSLambda-Python37-SciPy1x:2'
| 85
| 85
| 0.835294
| 14
| 85
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 0.023529
| 85
| 1
| 85
| 85
| 0.771084
| 0
| 0
| 0
| 0
| 0
| 0.77907
| 0.77907
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
13fe772d9f1f29153c6b69b63a77260e462b8a46
| 200
|
py
|
Python
|
Maths/231. Power of Two.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | 1
|
2021-06-30T17:51:56.000Z
|
2021-06-30T17:51:56.000Z
|
Maths/231. Power of Two.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
Maths/231. Power of Two.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n<0:
return False
y=str(bin(n))[2:]
if y.count("1")==1:
return True
return False
| 22.222222
| 43
| 0.475
| 27
| 200
| 3.518519
| 0.703704
| 0.231579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 0.395
| 200
| 8
| 44
| 25
| 0.752066
| 0
| 0
| 0.25
| 0
| 0
| 0.005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
cd3d207fc113c362c45a73dadd987d170ee21cb9
| 32,453
|
py
|
Python
|
src/projects/views.py
|
dianshao-embedded/dianshao
|
8d4c30e152653fe95ad7b004e64f4686785cdf92
|
[
"MIT"
] | 3
|
2022-01-14T01:28:34.000Z
|
2022-01-17T10:58:40.000Z
|
src/projects/views.py
|
dianshao-embedded/dianshao
|
8d4c30e152653fe95ad7b004e64f4686785cdf92
|
[
"MIT"
] | null | null | null |
src/projects/views.py
|
dianshao-embedded/dianshao
|
8d4c30e152653fe95ad7b004e64f4686785cdf92
|
[
"MIT"
] | 3
|
2022-01-16T06:15:20.000Z
|
2022-02-06T07:22:15.000Z
|
from celery import Task
from django.shortcuts import redirect, render
from django.template import context
from django.urls import reverse
from .models import *
from .forms import *
from .tasks import *
from os import path
# Create your views here.
def project(request):
projects = Project.objects.all()
form = ProjectModelForm()
form_import = ProjectImportForm()
if request.method == 'POST':
form = ProjectModelForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.project_path = '/home/dianshao/yocto'
project.save()
result = shell_cmd_task.delay("mkdir %s/%s" % (project.project_path, project.project_name), project.project_path)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:initial', args=(project.id,)))
context = {
'projects': projects,
'form': form,
'form_import': form_import,
}
return render(request, 'projects/projects.html', context)
def project_initial(request, project_id):
project = Project.objects.get(id=project_id)
result = project_initial_task.delay(project.id, project.project_path,
project.project_version, project.project_name)
return render(request, 'projects/project_initial.html',
context={'task_id': result.task_id, 'project_id': project_id})
def project_import(request):
if request.method == 'POST':
form = ProjectImportForm(request.POST)
if form.is_valid():
result = project_import_task.delay('/home/dianshao/yocto',
form.cleaned_data['name'],
form.cleaned_data['url'])
return render(request, 'projects/project_import.html', context={'task_id': result.task_id})
def project_delete(request, project_id):
if request.method == 'POST':
project = Project.objects.get(id=project_id)
result = shell_cmd_task.delay('rm -rf %s' %
(path.join(project.project_path, project.project_name)), project.project_path)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
project.delete()
return redirect('/projects/')
def metas(request, project_id):
metas = MetaLayer.objects.filter(project__id=project_id)
return render(request, 'projects/metas.html',context={'metas': metas, 'project_id': project_id})
def meta_create(request, project_id):
# TODO: add meta-layer to config files
project = Project.objects.get(id=project_id)
form = MetaModelForm()
if request.method == 'POST':
form = MetaModelForm(request.POST)
if form.is_valid():
result = meta_clone_task.delay(form.cleaned_data['name'],
form.cleaned_data['url'],
form.cleaned_data['remote_or_local'],
form.cleaned_data['sub'],
project.id)
return render(request, 'projects/meta_create.html',
context={'form': form, 'task_id': result.task_id, 'project_id': project_id})
else:
return render(request, 'projects/meta_create.html', context={'form': form})
else:
return render(request, 'projects/meta_create.html', context={'form': form})
def project_export(request, project_id):
result = project_export_task.delay(project_id)
return render(request, 'projects/project_export.html', context={'task_id': result.task_id})
def bitbake(request, project_id):
# TODO: 决定还是不开发bitbake自由编译权限,只提供myMeta 菜单编译, uboot 编译, 内核编译和全项目编译
project = Project.objects.get(id=project_id)
form = BuildModelForm()
if request.method == 'POST':
form = BuildModelForm(request.POST)
if form.is_valid():
result = bitbake_progress.delay(project.project_path,
project.project_name,
form.cleaned_data['target'],
form.cleaned_data['command'])
return render(request, 'projects/bitbake_cmd.html', context={'form': form, 'task_id': result.task_id, 'project_id': project_id})
else:
return render(request, 'projects/bitbake_cmd.html', context={'form': form})
else:
return render(request, 'projects/bitbake_cmd.html', context={'form': form})
def mymeta(request, project_id):
mypackages = MyPackages.objects.filter(project__id=project_id)
mymachines = MyMachine.objects.filter(project__id=project_id)
myimages = MyImage.objects.filter(project__id=project_id)
context = {
'project_id': project_id,
'mypackages': mypackages,
'mymachines': mymachines,
'myimages': myimages,
}
return render(request, 'projects/mymeta.html', context)
def mypackages(request, project_id):
mypackages = MyPackages.objects.filter(project__id=project_id)
context = {
'project_id': project_id,
'mypackages': mypackages,
}
return render(request, 'projects/mypackages.html', context)
def myimages(request, project_id):
myimages = MyImage.objects.filter(project__id=project_id)
context = {
'project_id': project_id,
'myimages': myimages,
}
return render(request, 'projects/myimages.html', context)
def mypackage_create(request, project_id):
form = MyPackagesModelForm()
if request.method == 'POST':
form = MyPackagesModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = Project.objects.get(id=project_id)
if form_obj.initial_method == 'Systemd':
form_obj.inherit = "systemd"
elif form_obj.initial_method == 'System-V':
form_obj.inherit = 'update-rc.d'
if form_obj.language == 'Golang':
form_obj.inherit += " goarch"
if form_obj.donwload_method == 'git':
form_obj.building_directory = "$(WORKDIR)/git"
form_obj.save()
return redirect(reverse('projects:mypackages', args=(project_id,)))
return render(request, 'projects/mypackage_create.html', context={'form': form, 'project_id': project_id})
def mypackage_delete(request, project_id, mypackage_id):
if request.method == 'POST':
project = Project.objects.get(id=project_id)
package = MyPackages.objects.get(id=mypackage_id)
result = shell_cmd_task.delay('rm -rf %s' %
(path.join(project.project_path, project.project_name,
'meta', package.catagory, package.name)), project.project_path)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
package.delete()
return redirect(reverse('projects:mypackages', args=(project_id,)))
def mypackage_detail(request, project_id, mypackage_id):
project = Project.objects.get(id=project_id)
mypackage = MyPackages.objects.get(id=mypackage_id)
form = MyPackagesModelForm(instance=mypackage)
tasks = Tasks.objects.filter(package__id = mypackage_id).order_by('id')
localfiles = LocalFile.objects.filter(package__id = mypackage_id).order_by('id')
extraMarcos = ExtraMarco.objects.filter(package__id = mypackage_id).order_by('id')
context={
'form': form,
'tasks': tasks,
'localfiles': localfiles,
'extraMarcos': extraMarcos,
'project_id': project_id,
'mypackage_id': mypackage_id,
}
if request.method == 'POST':
form = MyPackagesModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = project
form_obj.id = mypackage_id
form_obj.save()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/mypackage_detail.html', context)
def task_create(request, project_id, mypackage_id):
form = TaskModelForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = TaskModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.package = MyPackages.objects.get(id=mypackage_id)
form_obj.save()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/task_create.html', context)
def task_delete(request, project_id, mypackage_id, task_id):
if request.method == 'POST':
task = Tasks.objects.get(id=task_id)
task.delete()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
def install_task_create(request, project_id, mypackage_id):
form = InstallTaskForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = InstallTaskForm(request.POST)
if form.is_valid():
package = MyPackages.objects.get(id=mypackage_id)
type = 'do_install'
subtype = form.cleaned_data['type']
if form.cleaned_data['is_directory'] == 'no':
op1 = ("install -d %s" % form.cleaned_data['install_path'])
desc1 = ("enter %s" % form.cleaned_data['install_path'])
op2 = ("install -m %s %s/%s %s" % (form.cleaned_data['permission'],
form.cleaned_data['source_path'], form.cleaned_data['name'],
form.cleaned_data['install_path']))
desc2 = ("copy file %s to %s" % (form.cleaned_data['name'], form.cleaned_data['install_path']))
installed_path = form.cleaned_data['install_path'].replace("${D}", "")
package.files_pn.append(installed_path)
package.save()
Tasks.objects.create(package=package, type=type, subtype=subtype, op=op1, description=desc1)
Tasks.objects.create(package=package, type=type, subtype=subtype, op=op2, description=desc2)
elif form.cleaned_data['is_directory'] == 'yes':
op1 = ("install -d %s" % form.cleaned_data['install_path'])
desc1 = ("enter %s" % form.cleaned_data['install_path'])
op2 = ("cp -r %s/%s %s" % (form.cleaned_data['source_path'],
form.cleaned_data['name'], form.cleaned_data['install_path']))
desc2 = ("copy directory %s to %s" % (form.cleaned_data['name'], form.cleaned_data['install_path']))
installed_path = form.cleaned_data['install_path'].replace("${D}", "")
package.files_pn.append(installed_path)
package.save()
Tasks.objects.create(package=package, type=type, subtype=subtype, op=op1, description=desc1)
Tasks.objects.create(package=package, type=type, subtype=subtype, op=op2, description=desc2)
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/install_task_create.html', context)
def extra_marco_create(request, project_id, mypackage_id):
form = ExtraMarcoModelForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = ExtraMarcoModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.package = MyPackages.objects.get(id=mypackage_id)
form_obj.save()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/extra_marco_create.html', context)
def extra_macro_delete(request, project_id, mypackage_id, macro_id):
if request.method == 'POST':
em = ExtraMarco.objects.get(id=macro_id)
em.delete()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
def file_create(request, project_id, mypackage_id):
# TODO: add auto install path target
form = LocalFileModelForm()
mypackage = MyPackages.objects.get(id=mypackage_id)
project = Project.objects.get(id=project_id)
context = {
'form': form,
'project_id': project_id,
'mypackage_id': mypackage_id,
}
if request.method == 'POST':
# TODO: Do not SAVE file content!
form = LocalFileModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
result = bbfile_localfile_create_task.delay(mypackage.name, mypackage.version, mypackage.type,
path.join(project.project_path, project.project_name),
form.cleaned_data['name'], form.cleaned_data['content'], mypackage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
form_obj.package = mypackage
form_obj.type = 'New File'
form_obj.content = 'Do not SAVE file content!'
form_obj.save()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/create_file.html', context)
def file_import(request, project_id, mypackage_id):
form = LocalFileModelForm()
mypackage = MyPackages.objects.get(id=mypackage_id)
project = Project.objects.get(id=project_id)
context = {
'form': form,
'project_id': project_id,
'mypackage_id': mypackage_id,
}
if request.method == 'POST':
form = LocalFileModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
result = bbfile_localfile_create_task.delay(mypackage.name, mypackage.version, mypackage.type,
path.join(project.project_path, project.project_name),
form.cleaned_data['path'], form.cleaned_data['name'], mypackage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
form_obj.package = mypackage
form_obj.type = 'Import File'
form_obj.save()
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/import_file.html', context)
def file_generate_patch(request, project_id, mypackage_id):
form = GeneratePatchFileForm()
mypackage = MyPackages.objects.get(id=mypackage_id)
project = Project.objects.get(id=project_id)
context = {
'form': form,
'project_id': project_id,
'mypackage_id': mypackage_id,
}
if request.method == 'POST':
form = GeneratePatchFileForm(request.POST)
if form.is_valid():
result = patch_generator_task.delay(form.cleaned_data['name'], form.cleaned_data['path'],
path.join(project.project_path, project.project_name),
mypackage.name, mypackage.version, mypackage.type, mypackage.catagory,
form.cleaned_data['old'], form.cleaned_data['new'])
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
LocalFile.objects.create(package=mypackage, name=form.cleaned_data['name'] + '.patch',
type = 'patch')
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
return render(request, 'projects/generata_patch_file.html', context)
def file_delete(request, project_id, mypackage_id, file_id):
package = MyPackages.objects.get(id=mypackage_id)
project = Project.objects.get(id=project_id)
file = LocalFile.objects.get(id=file_id)
if request.method == 'POST':
file = LocalFile.objects.get(id=file_id)
file.delete()
result = shell_cmd_task.delay('rm %s' %
(path.join(project.project_path, project.project_name,
'meta', package.catagory, package.name, 'files', file.name)), project.project_path)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
def mypackage_bbfile(request, project_id, mypackage_id):
project = Project.objects.get(id=project_id)
mypackage = MyPackages.objects.get(id=mypackage_id)
result = bbfile_task_create.delay(mypackage.name, mypackage.version, mypackage.type,
path.join(project.project_path, project.project_name),
mypackage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:mypackage_detail', args=(project_id, mypackage_id)))
def mypackage_bitbake(request, project_id, mypackage_id):
project = Project.objects.get(id=project_id)
mypackage = MyPackages.objects.get(id=mypackage_id)
result = bitbake_progress.delay(project.project_path,
project.project_name,
mypackage.name, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'mypackage_id': mypackage_id,
'package_name': mypackage.name,
}
return render(request, 'projects/mypackage_bitbake.html', context)
def mymachine_create(request, project_id):
form = MyMachineModelForm()
if request.method == 'POST':
form = MyMachineModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = Project.objects.get(id=project_id)
if form_obj.flash == 'Spi-Nor':
form_obj.filesystem = 'jffs2'
elif form_obj.flash == 'Rawnand':
form_obj.filesystem = 'ubifs'
else:
form_obj.filesystem = 'ext4'
form_obj.save()
return redirect(reverse('projects:mymeta', args=(project_id,)))
return render(request, 'projects/mymachine_create.html',
context={'form': form, 'project_id': project_id})
def mymachine(request, project_id):
project = Project.objects.get(id=project_id)
mymachine = MyMachine.objects.get(project__id=project_id)
form = MyMachineModelForm(instance=mymachine)
extraMarcos = MachineExtraMarco.objects.filter(machine__id = mymachine.id).order_by('id')
context={
'form': form,
'extraMarcos': extraMarcos,
'project_id': project_id,
'mymachine_id': mymachine.id,
}
if request.method == 'POST':
form = MyMachineModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = project
form_obj.name = mymachine.name
form_obj.id = mymachine.id
form_obj.save()
return redirect(reverse('projects:mymachine', args=(project_id,)))
return render(request, 'projects/mymachine.html', context)
def extra_machine_marco_create(request, project_id, mymachine_id):
form = ExtraMachineMarcoModelForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = ExtraMachineMarcoModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.machine = MyMachine.objects.get(id=mymachine_id)
form_obj.save()
return redirect(reverse('projects:mymachine', args=(project_id,)))
return render(request, 'projects/extra_machine_marco_create.html', context)
def mymachine_file(request, project_id, mymachine_id):
project = Project.objects.get(id=project_id)
result = machinefile_create_task.delay(mymachine_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:mymachine', args=(project_id,)))
def myimage_create(request, project_id):
form = MyImageModelForm()
context = {
'form': form,
'project_id': project_id
}
if request.method == 'POST':
form = MyImageModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = Project.objects.get(id=project_id)
form_obj.save()
return redirect(reverse('projects:myimages', args=(project_id,)))
return render(request, 'projects/myimage_create.html', context)
def myimage_detail(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
form = MyImageModelForm(instance=myimage)
extraMarcos = MyImageExtraMarco.objects.filter(image__id = myimage_id).order_by('id')
context={
'form': form,
'extraMarcos': extraMarcos,
'project_id': project_id,
'myimage_id': myimage_id,
}
if request.method == 'POST':
form = MyImageModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.project = project
form_obj.id = myimage_id
form_obj.save()
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
return render(request, 'projects/myimage_detail.html', context)
def myimage_delete(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
if request.method == 'POST':
result = shell_cmd_task.delay('rm %s' %
(path.join(project.project_path, project.project_name,
'meta/recipes-core/images', myimage.name+'.bb')), project.project_path)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
myimage.delete()
return redirect(reverse('projects:myimages', args=(project_id,)))
def image_extra_marco_create(request, project_id, myimage_id):
form = MyImageExtraMarcoModelForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = MyImageExtraMarcoModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.image = MyImage.objects.get(id=myimage_id)
form_obj.save()
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
return render(request, 'projects/extra_image_marco_create.html', context)
def image_extra_macro_delete(request, project_id, myimage_id, macro_id):
if request.method == 'POST':
em = MyImageExtraMarco.objects.get(id=macro_id)
em.delete()
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
def myimagepackage_create(request, project_id, myimage_id):
form = MyImagePackageModelForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = MyImagePackageModelForm(request.POST)
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.image = MyImage.objects.get(id=myimage_id)
form_obj.save()
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
return render(request, 'projects/package_import.html', context)
def myimage_file(request, project_id, myimage_id):
result = imagefile_create_task.delay(myimage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
def update_generate(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
result = updatefile_create_task.delay(myimage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
result = bitbake_progress.delay(project.project_path,
project.project_name,
'update-bundle-' + myimage.name, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
'image_name': myimage.name,
}
return render(request, 'projects/image_bitbake.html', context)
def myimage_bitbake(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
result = bitbake_progress.delay(project.project_path,
project.project_name,
myimage.name, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
'image_name': myimage.name,
}
return render(request, 'projects/image_bitbake.html', context)
def myupdate_bitbake(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
result = bitbake_progress.delay(project.project_path,
project.project_name,
'update-bundle-' + myimage.name, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
'image_name': myimage.name,
}
return render(request, 'projects/image_bitbake.html', context)
def myimage_upload(request, project_id, myimage_id):
result = imagefile_upload_task.delay(myimage_id)
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
def myconf_update(request, project_id):
form = MyConfForm()
context = {
'form': form,
'project_id': project_id,
}
if request.method == 'POST':
form = MyConfForm(request.POST)
if form.is_valid():
result = config_set_task.delay(project_id,
form.cleaned_data['machine'],
form.cleaned_data['distro'],
form.cleaned_data['parallel_make'],
form.cleaned_data['max_parallel_threads'])
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
return redirect(reverse('projects:mymeta', args=(project_id,)))
return render(request, 'projects/myconf_update.html', context)
def add_wks_file(request, project_id, myimage_id):
form = LocalFileModelForm()
myimage = MyImage.objects.get(id=myimage_id)
context = {
'myimage_id': myimage_id,
'project_id': project_id,
'form': form
}
if request.method == 'POST':
form = LocalFileModelForm(request.POST)
if form.is_valid():
result = create_wks_file.delay(project_id,
form.cleaned_data['name'],
form.cleaned_data['content'])
while 1:
if (result._get_task_meta())["status"] == 'FAILURE':
raise Exception('shell command task error')
elif (result._get_task_meta())["status"] == 'SUCCESS':
break
myimage.wic_file = form.cleaned_data['name']
myimage.save()
return redirect(reverse('projects:myimage_detail', args=(project_id, myimage_id)))
return render(request, 'projects/create_wks_file.html', context)
"""
def uboot_bitbake(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
machine = MyMachine.objects.get(name = myimage.machine)
result = bitbake_progress.delay(project.project_path,
project.project_name,
machine.uboot, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
}
return render(request, 'projects/uboot_bitbake.html', context)
def kernel_bitbake(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
machine = MyMachine.objects.get(name = myimage.machine)
result = bitbake_progress.delay(project.project_path,
project.project_name,
machine.kernel, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
}
return render(request, 'projects/kernel_bitbake.html', context)
def image_bitbake(request, project_id, myimage_id):
project = Project.objects.get(id=project_id)
myimage = MyImage.objects.get(id=myimage_id)
result = bitbake_progress.delay(project.project_path,
project.project_name,
myimage.name, 'build')
context={
'task_id': result.task_id,
'project_id': project_id,
'myimage_id': myimage_id,
'image_name': myimage.name,
}
return render(request, 'projects/image_bitbake.html', context)
"""
| 38.497034
| 140
| 0.605522
| 3,524
| 32,453
| 5.352157
| 0.061862
| 0.08446
| 0.043158
| 0.054398
| 0.812258
| 0.768676
| 0.726155
| 0.693972
| 0.654525
| 0.637082
| 0
| 0.001408
| 0.277725
| 32,453
| 843
| 141
| 38.497034
| 0.803242
| 0.005885
| 0
| 0.612642
| 0
| 0
| 0.129773
| 0.045317
| 0
| 0
| 0
| 0.001186
| 0
| 1
| 0.068071
| false
| 0
| 0.029173
| 0
| 0.200972
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cd711c9b7a622485871f925acbdfaff312b017e8
| 124
|
py
|
Python
|
fiction/spiders/config.py
|
0xz-0/DownloadFiction
|
c9fd118baefacd86c864ff89dcc1d18c974649e1
|
[
"MIT"
] | null | null | null |
fiction/spiders/config.py
|
0xz-0/DownloadFiction
|
c9fd118baefacd86c864ff89dcc1d18c974649e1
|
[
"MIT"
] | null | null | null |
fiction/spiders/config.py
|
0xz-0/DownloadFiction
|
c9fd118baefacd86c864ff89dcc1d18c974649e1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/4/2 22:38
# @Author : 0xz-0
# @File : config.py
# @Software: PyCharm
| 15.5
| 28
| 0.491935
| 17
| 124
| 3.588235
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 0.298387
| 124
| 7
| 29
| 17.714286
| 0.551724
| 0.83871
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
26b872a06af5828758ba85f0c502f31712023f46
| 205
|
py
|
Python
|
examples/namespaces/us_measuring_service.py
|
mt3o/injectable
|
0ffc5c758b63d9391134cd822158e1846999b404
|
[
"MIT"
] | 71
|
2018-02-05T04:12:27.000Z
|
2022-02-15T23:08:16.000Z
|
examples/namespaces/us_measuring_service.py
|
Euraxluo/injectable
|
74e640f0911480fb06fa97c1a468c3863541c0fd
|
[
"MIT"
] | 104
|
2018-02-06T23:37:36.000Z
|
2021-08-25T04:50:15.000Z
|
examples/namespaces/us_measuring_service.py
|
Euraxluo/injectable
|
74e640f0911480fb06fa97c1a468c3863541c0fd
|
[
"MIT"
] | 13
|
2019-02-10T18:52:50.000Z
|
2022-01-26T17:12:35.000Z
|
from injectable import injectable
@injectable(qualifier="MEASURING_SERVICE", namespace="US")
class UnitedStatesMeasuringService:
def earth_to_sun_distance(self):
return "94.06 million miles"
| 25.625
| 58
| 0.780488
| 23
| 205
| 6.782609
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022599
| 0.136585
| 205
| 7
| 59
| 29.285714
| 0.858757
| 0
| 0
| 0
| 0
| 0
| 0.185366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
26fd1258d1f8e7a6105f878b784dce24bd831e4d
| 88
|
py
|
Python
|
qym_api/apps.py
|
shidharthadas/question-your-mentor
|
79a88fbf066663bf0dbb9a40ef46274be98f2089
|
[
"MIT"
] | null | null | null |
qym_api/apps.py
|
shidharthadas/question-your-mentor
|
79a88fbf066663bf0dbb9a40ef46274be98f2089
|
[
"MIT"
] | null | null | null |
qym_api/apps.py
|
shidharthadas/question-your-mentor
|
79a88fbf066663bf0dbb9a40ef46274be98f2089
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class QymApiConfig(AppConfig):
name = 'qym_api'
| 14.666667
| 33
| 0.75
| 11
| 88
| 5.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170455
| 88
| 5
| 34
| 17.6
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0.079545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f811ee9b48ccf5772687c383d504a1b84e1fba7a
| 30
|
py
|
Python
|
env/lib/python3.6/ntpath.py
|
ayesha133/personal-website
|
e47ee3df00bf19abe41b74390edc105ada7381e9
|
[
"MIT"
] | 7
|
2018-12-20T14:38:17.000Z
|
2020-08-28T17:50:56.000Z
|
env/lib/python3.6/ntpath.py
|
ayesha133/personal-website
|
e47ee3df00bf19abe41b74390edc105ada7381e9
|
[
"MIT"
] | 10
|
2018-12-04T09:06:59.000Z
|
2021-04-30T20:49:55.000Z
|
env/lib/python3.6/ntpath.py
|
ayesha133/personal-website
|
e47ee3df00bf19abe41b74390edc105ada7381e9
|
[
"MIT"
] | 12
|
2018-10-20T20:40:48.000Z
|
2021-03-08T18:28:07.000Z
|
/usr/lib64/python3.6/ntpath.py
| 30
| 30
| 0.8
| 6
| 30
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 30
| 1
| 30
| 30
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f8421c2c6bcabfbc2aac25292705844e86ecdbfc
| 89
|
py
|
Python
|
disckeep.py
|
Underpath/disckeep
|
662f4f3b7ba5bc67372bc37867fe0a9f83fc3a13
|
[
"MIT"
] | null | null | null |
disckeep.py
|
Underpath/disckeep
|
662f4f3b7ba5bc67372bc37867fe0a9f83fc3a13
|
[
"MIT"
] | null | null | null |
disckeep.py
|
Underpath/disckeep
|
662f4f3b7ba5bc67372bc37867fe0a9f83fc3a13
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from disckeep.web import app
app.run(debug=True, host="0.0.0.0")
| 14.833333
| 35
| 0.696629
| 18
| 89
| 3.444444
| 0.777778
| 0.096774
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.11236
| 89
| 5
| 36
| 17.8
| 0.734177
| 0.224719
| 0
| 0
| 0
| 0
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f85233a88d71ac2ecbe391c73aaa2dda9f0728f5
| 26
|
py
|
Python
|
src/repobee_plug/__version.py
|
gauravagrwal/repobee
|
487af09bd674eb7ca55281fd49cbd874a8317067
|
[
"MIT"
] | 39
|
2019-04-02T15:53:23.000Z
|
2022-03-07T02:38:41.000Z
|
src/repobee_plug/__version.py
|
gauravagrwal/repobee
|
487af09bd674eb7ca55281fd49cbd874a8317067
|
[
"MIT"
] | 788
|
2019-03-31T13:55:53.000Z
|
2022-03-29T20:41:02.000Z
|
src/repobee_plug/__version.py
|
slarse/repomate
|
487af09bd674eb7ca55281fd49cbd874a8317067
|
[
"MIT"
] | 18
|
2020-06-15T11:49:50.000Z
|
2022-03-06T19:05:53.000Z
|
__version__ = "3.8.0-dev"
| 13
| 25
| 0.653846
| 5
| 26
| 2.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.115385
| 26
| 1
| 26
| 26
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f8b4f47f05b94757c490db0e0616c03f7d6d04ae
| 62
|
py
|
Python
|
abc/101/A.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | 2
|
2022-01-22T07:56:58.000Z
|
2022-01-24T00:29:37.000Z
|
abc/101/A.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
abc/101/A.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
S = list(map(str, input()))
print(S.count("+") - S.count("-"))
| 31
| 34
| 0.532258
| 10
| 62
| 3.3
| 0.7
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 62
| 2
| 34
| 31
| 0.589286
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
3e22c939b8f19053c4d24a57798de284890930c2
| 43
|
py
|
Python
|
tuw_nlp/__init__.py
|
Eszti/tuw-nlp
|
35e4184b4d67eb2cd5085e28535a571a099d5825
|
[
"MIT"
] | null | null | null |
tuw_nlp/__init__.py
|
Eszti/tuw-nlp
|
35e4184b4d67eb2cd5085e28535a571a099d5825
|
[
"MIT"
] | null | null | null |
tuw_nlp/__init__.py
|
Eszti/tuw-nlp
|
35e4184b4d67eb2cd5085e28535a571a099d5825
|
[
"MIT"
] | null | null | null |
__version__ = "0.0.1"
from .utils import *
| 14.333333
| 21
| 0.674419
| 7
| 43
| 3.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.162791
| 43
| 3
| 22
| 14.333333
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3e313b25864ffcd3decae3d7abb36adc525d163a
| 41
|
py
|
Python
|
MillerArrays/millerArrayCC.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
MillerArrays/millerArrayCC.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
MillerArrays/millerArrayCC.py
|
MooersLab/jupyterlabcctbxsnips
|
c5f0947b4e8c4e5839b9b6b15c81c62915103155
|
[
"MIT"
] | null | null | null |
miller_arrays[0].cc_one_half_sigma_tau()
| 20.5
| 40
| 0.853659
| 8
| 41
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.02439
| 41
| 1
| 41
| 41
| 0.725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3e38df9857fd05c34501b892a3c9c9792798b211
| 134
|
py
|
Python
|
main.py
|
paulbodean88/py-test-automation-course
|
f7e5fc0416f494356817c27e7b4e212f3884ae07
|
[
"MIT"
] | 1
|
2022-03-25T10:19:05.000Z
|
2022-03-25T10:19:05.000Z
|
main.py
|
paulbodean88/py-test-automation-course
|
f7e5fc0416f494356817c27e7b4e212f3884ae07
|
[
"MIT"
] | null | null | null |
main.py
|
paulbodean88/py-test-automation-course
|
f7e5fc0416f494356817c27e7b4e212f3884ae07
|
[
"MIT"
] | null | null | null |
"""
Entry point towards the final project
"""
if __name__ == '__main__':
print(f'{True} story')
else:
print(f'{False} story')
| 16.75
| 37
| 0.634328
| 18
| 134
| 4.277778
| 0.833333
| 0.155844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186567
| 134
| 8
| 38
| 16.75
| 0.706422
| 0.276119
| 0
| 0
| 0
| 0
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
e45261de7f5505e6b460514289b06c1b8381a2e4
| 1,630
|
py
|
Python
|
smarttm_web/migrations/0035_auto_20200703_2043.py
|
mseemab/smarttm
|
4fbc4f2ae57b8555709fafce41528e7d8137ab82
|
[
"MIT"
] | 1
|
2019-11-25T16:03:58.000Z
|
2019-11-25T16:03:58.000Z
|
smarttm_web/migrations/0035_auto_20200703_2043.py
|
mseemab/smarttm
|
4fbc4f2ae57b8555709fafce41528e7d8137ab82
|
[
"MIT"
] | 32
|
2019-12-08T13:53:49.000Z
|
2022-03-12T00:05:23.000Z
|
smarttm_web/migrations/0035_auto_20200703_2043.py
|
mseemab/smarttm
|
4fbc4f2ae57b8555709fafce41528e7d8137ab82
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-07-03 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smarttm_web', '0034_summary_ranking'),
]
operations = [
migrations.CreateModel(
name='Member_Summary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('member_name', models.CharField(max_length=100)),
('prep_speech_count', models.IntegerField(default=0)),
('tt_speech_count', models.IntegerField(default=0)),
('eval_speech_count', models.IntegerField(default=0)),
('adv_roles_count', models.IntegerField(default=0)),
('present_count', models.IntegerField(default=0)),
('parts_count', models.IntegerField(default=0)),
('meeting_part_count', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='summary',
name='ge_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='summary',
name='toe_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='summary',
name='ttm_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='user',
name='is_active',
field=models.BooleanField(default=True),
),
]
| 33.958333
| 114
| 0.56319
| 155
| 1,630
| 5.735484
| 0.432258
| 0.202475
| 0.281215
| 0.292463
| 0.544432
| 0.434196
| 0.309336
| 0.309336
| 0.309336
| 0.309336
| 0
| 0.029307
| 0.309202
| 1,630
| 47
| 115
| 34.680851
| 0.760213
| 0.028221
| 0
| 0.365854
| 1
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e4a6b9445a1113134cedaac51c1f11f6f648724c
| 5,828
|
py
|
Python
|
conbench/tests/app/test_users.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 48
|
2020-03-02T16:55:46.000Z
|
2022-02-26T00:35:57.000Z
|
conbench/tests/app/test_users.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 103
|
2020-03-23T00:22:46.000Z
|
2022-03-31T22:34:40.000Z
|
conbench/tests/app/test_users.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 6
|
2020-03-04T17:52:35.000Z
|
2022-03-30T11:53:40.000Z
|
from ...tests.app import _asserts
class TestUsers(_asserts.AppEndpointTest):
def test_user_list_authenticated(self, client):
self.authenticate(client)
other = self.create_random_user()
response = client.get("/users/")
self.assert_page(response, "Users")
assert "{}</td>".format(other.email).encode() in response.data
def test_user_list_unauthenticated(self, client):
response = client.get("/users/", follow_redirects=True)
self.assert_login_page(response)
class TestUser(_asserts.AppEndpointTest):
def test_user_get_authenticated(self, client):
self.authenticate(client)
other = self.create_random_user()
response = client.get(f"/users/{other.id}/")
self.assert_page(response, "User")
assert 'value="{}"'.format(other.name).encode() in response.data
def test_user_get_unauthenticated(self, client):
other = self.create_random_user()
response = client.get(f"/users/{other.id}/", follow_redirects=True)
self.assert_login_page(response)
def test_user_get_unknown(self, client):
self.authenticate(client)
response = client.get("/users/unknown/", follow_redirects=True)
self.assert_index_page(response)
assert b"Error getting user." in response.data
def test_user_update_authenticated(self, client):
self.authenticate(client)
other = self.create_random_user()
# go to user page
response = client.get(f"/users/{other.id}/")
self.assert_page(response, "User")
# update user
data = {
"name": "New Name",
"email": other.email,
"csrf_token": self.get_csrf_token(response),
}
response = client.post(f"/users/{other.id}/", data=data, follow_redirects=True)
self.assert_page(response, "User")
assert b"User updated." in response.data
assert b'value="New Name"' in response.data
def test_user_update_unauthenticated(self, client):
other = self.create_random_user()
response = client.post(f"/users/{other.id}/", data={}, follow_redirects=True)
self.assert_login_page(response)
def test_user_update_no_csrf_token(self, client):
self.authenticate(client)
other = self.create_random_user()
response = client.post(f"/users/{other.id}/", data={})
self.assert_page(response, "User")
assert b"The CSRF token is missing." in response.data
# TODO: assert name not updated?
def test_user_update_failed(self, client):
self.authenticate(client)
other = self.create_random_user()
response = client.post(f"/users/{other.id}/", data={"email": "Not an email"})
self.assert_page(response, "User")
assert b"Invalid email address." in response.data
def test_user_delete_authenticated(self, client):
self.authenticate(client)
other = self.create_random_user()
# can get user before
response = client.get(f"/users/{other.id}/")
self.assert_page(response, "User")
assert 'value="{}"'.format(other.name).encode() in response.data
# delete user
data = {"delete": ["Delete"], "csrf_token": self.get_csrf_token(response)}
response = client.post(f"/users/{other.id}/", data=data, follow_redirects=True)
self.assert_page(response, "Users")
assert b"User deleted." in response.data
# cannot get user after
response = client.get(f"/users/{other.id}/", follow_redirects=True)
self.assert_index_page(response)
assert b"Error getting user." in response.data
def test_user_delete_unauthenticated(self, client):
other = self.create_random_user()
data = {"delete": ["Delete"]}
response = client.post(f"/users/{other.id}/", data=data, follow_redirects=True)
self.assert_login_page(response)
def test_user_delete_no_csrf_token(self, client):
self.authenticate(client)
other = self.create_random_user()
data = {"delete": ["Delete"]}
response = client.post(f"/users/{other.id}/", data=data, follow_redirects=True)
self.assert_page(response, "User")
assert b"The CSRF token is missing." in response.data
# TODO: test user not deleted?
class TestUserCreate(_asserts.AppEndpointTest):
def test_user_create_get_authenticated(self, client):
self.authenticate(client)
response = client.get("/users/create/")
self.assert_page(response, "User Create")
def test_user_create_get_unauthenticated(self, client):
response = client.get("/users/create/", follow_redirects=True)
self.assert_login_page(response)
def test_user_create_post_authenticated(self, client):
self.authenticate(client)
# go to user create page
response = client.get("/users/create/")
self.assert_page(response, "User Create")
# create user
data = {
"email": "new@example.com",
"name": "New user",
"password": "password",
"csrf_token": self.get_csrf_token(response),
}
response = client.post("/users/create/", data=data, follow_redirects=True)
self.assert_page(response, "Users")
assert b"User created." in response.data
def test_user_create_post_unauthenticated(self, client):
response = client.post("/users/create/", data={}, follow_redirects=True)
self.assert_login_page(response)
def test_user_create_post_no_csrf_token(self, client):
self.authenticate(client)
response = client.post("/users/create/", data={})
self.assert_page(response, "User Create")
assert b"The CSRF token is missing." in response.data
| 39.378378
| 87
| 0.652539
| 710
| 5,828
| 5.164789
| 0.104225
| 0.075266
| 0.050995
| 0.077993
| 0.866376
| 0.82438
| 0.782929
| 0.692937
| 0.663485
| 0.640033
| 0
| 0
| 0.222375
| 5,828
| 147
| 88
| 39.646259
| 0.809135
| 0.030199
| 0
| 0.594595
| 0
| 0
| 0.137008
| 0
| 0
| 0
| 0
| 0.006803
| 0.342342
| 1
| 0.153153
| false
| 0.009009
| 0.009009
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e4ae49c17206574adb00f9ec5596d9460582b41d
| 204
|
py
|
Python
|
recipe/home/views.py
|
degerahmet/Recipe-Web-App
|
ec40117cdcae2c8cdef42f2014ca8227ae63f758
|
[
"MIT"
] | null | null | null |
recipe/home/views.py
|
degerahmet/Recipe-Web-App
|
ec40117cdcae2c8cdef42f2014ca8227ae63f758
|
[
"MIT"
] | null | null | null |
recipe/home/views.py
|
degerahmet/Recipe-Web-App
|
ec40117cdcae2c8cdef42f2014ca8227ae63f758
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
# Create your views here.
def index(request):
return render(request,"home/index.html")
| 29.142857
| 44
| 0.803922
| 28
| 204
| 5.857143
| 0.678571
| 0.182927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122549
| 204
| 7
| 45
| 29.142857
| 0.916201
| 0.112745
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
e4ead59992a7db366501dbca7b00033a401c3f43
| 56
|
py
|
Python
|
MelodyHouse/venv/lib/python3.7/site-packages/decode/plot/__init__.py
|
sust-swe/MelodyHouse
|
35020f8fd7d2da8ef13e8f4fe0163c0185203994
|
[
"Apache-2.0"
] | 1
|
2019-10-05T18:20:27.000Z
|
2019-10-05T18:20:27.000Z
|
MelodyHouse/venv/lib/python3.7/site-packages/decode/plot/__init__.py
|
sust-swe/MelodyHouse
|
35020f8fd7d2da8ef13e8f4fe0163c0185203994
|
[
"Apache-2.0"
] | null | null | null |
MelodyHouse/venv/lib/python3.7/site-packages/decode/plot/__init__.py
|
sust-swe/MelodyHouse
|
35020f8fd7d2da8ef13e8f4fe0163c0185203994
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from .functions import *
del functions
| 11.2
| 24
| 0.732143
| 8
| 56
| 5.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.178571
| 56
| 4
| 25
| 14
| 0.869565
| 0.232143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
901bc08339dafbeb68a81b68ea221acba88bb329
| 453
|
py
|
Python
|
iridium/test/keystone_tests/keystone_test.py
|
Toure/Rhea
|
fda0e4cd7c568943725245393bfe762bc858e917
|
[
"Apache-2.0"
] | 1
|
2015-08-19T15:55:46.000Z
|
2015-08-19T15:55:46.000Z
|
iridium/test/keystone_tests/keystone_test.py
|
Toure/Rhea
|
fda0e4cd7c568943725245393bfe762bc858e917
|
[
"Apache-2.0"
] | null | null | null |
iridium/test/keystone_tests/keystone_test.py
|
Toure/Rhea
|
fda0e4cd7c568943725245393bfe762bc858e917
|
[
"Apache-2.0"
] | null | null | null |
__author__ = "Toure Dunnon"
__license__ = "Apache License 2.0"
__version__ = "0.1"
__email__ = "toure@redhat.com"
__status__ = "Alpha"
import pytest
def test_authentication():
pass
def test_user_creation():
pass
def test_tenant_creation():
pass
def test_project_creation():
pass
def test_rule_creation():
pass
def test_user_deletion():
pass
def test_tenant_deletion():
pass
def test_rule_deletion():
pass
| 11.615385
| 34
| 0.701987
| 58
| 453
| 4.87931
| 0.448276
| 0.19788
| 0.272085
| 0.268551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01108
| 0.203091
| 453
| 38
| 35
| 11.921053
| 0.772853
| 0
| 0
| 0.363636
| 0
| 0
| 0.119205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.363636
| 0.045455
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
5f4a1c7b3332917529e073d77adebbdf5d881874
| 59
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/course-work/Python-Brain-Teasers/loop.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_RESOURCES/course-work/Python-Brain-Teasers/loop.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_RESOURCES/course-work/Python-Brain-Teasers/loop.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
for n in range(5):
print(n, end=" ")
n = 5
print()
| 11.8
| 21
| 0.474576
| 11
| 59
| 2.545455
| 0.636364
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.322034
| 59
| 4
| 22
| 14.75
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
5f67fce84244aa0ddfdd38ddae2ee9ab125397f8
| 218
|
py
|
Python
|
ctr_data/avazu.py
|
bbruceyuan/ctr-data
|
b729a55c3966e85db2e36fa538c749411477dd01
|
[
"MIT"
] | null | null | null |
ctr_data/avazu.py
|
bbruceyuan/ctr-data
|
b729a55c3966e85db2e36fa538c749411477dd01
|
[
"MIT"
] | null | null | null |
ctr_data/avazu.py
|
bbruceyuan/ctr-data
|
b729a55c3966e85db2e36fa538c749411477dd01
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
class AvazuDataset(Dataset):
def __init__(self, num_sample=NUM_SAMPLE):
pass
def __len__(self):
pass
def __getitem__(self):
pass
| 16.769231
| 46
| 0.614679
| 25
| 218
| 4.8
| 0.64
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311927
| 218
| 13
| 47
| 16.769231
| 0.8
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.375
| 0.125
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
5f750b7e64a28c51965fe05d6ca929edaf0d0f1c
| 68
|
py
|
Python
|
lib/demo/__init__.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | 5
|
2015-01-30T03:50:37.000Z
|
2015-09-23T00:46:11.000Z
|
lib/demo/__init__.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | null | null | null |
lib/demo/__init__.py
|
dpla/akara
|
432f14782152dd19931bdbd8f9fad19b5932426d
|
[
"Apache-2.0"
] | 3
|
2015-03-09T19:16:56.000Z
|
2019-09-19T02:41:29.000Z
|
# This is a Python module.
# It contains demo extensions for Akara.
| 22.666667
| 40
| 0.75
| 11
| 68
| 4.636364
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191176
| 68
| 2
| 41
| 34
| 0.927273
| 0.926471
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5f7531d53b20e9545029c8fb46fb85b91f6408ae
| 166
|
py
|
Python
|
personalPages/compile_assets.py
|
Toliveira97/PersonalBlog
|
97b971e06821598be53166609ab685647d33f940
|
[
"MIT"
] | null | null | null |
personalPages/compile_assets.py
|
Toliveira97/PersonalBlog
|
97b971e06821598be53166609ab685647d33f940
|
[
"MIT"
] | 4
|
2018-06-28T17:11:33.000Z
|
2019-09-24T11:04:29.000Z
|
personalPages/compile_assets.py
|
Toliveira97/PersonalBlog
|
97b971e06821598be53166609ab685647d33f940
|
[
"MIT"
] | 2
|
2018-10-04T16:12:06.000Z
|
2018-10-04T17:10:16.000Z
|
import sass
import os
SASS_DIR='sass'
CSS_OUTPUT_DIR='static/personalPages/css/'
# sass
sass.compile(dirname=(SASS_DIR, CSS_OUTPUT_DIR), output_style='compressed')
| 20.75
| 75
| 0.795181
| 25
| 166
| 5
| 0.48
| 0.112
| 0.192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078313
| 166
| 8
| 75
| 20.75
| 0.816993
| 0.024096
| 0
| 0
| 0
| 0
| 0.242236
| 0.15528
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5f9947dd448a67a4d70b5f5e13227af470552716
| 1,386
|
py
|
Python
|
generate-badchars.py
|
BleepSec/random-exploits
|
26ec45c465d149f55973aef337024c0ea34b7e9e
|
[
"MIT"
] | null | null | null |
generate-badchars.py
|
BleepSec/random-exploits
|
26ec45c465d149f55973aef337024c0ea34b7e9e
|
[
"MIT"
] | null | null | null |
generate-badchars.py
|
BleepSec/random-exploits
|
26ec45c465d149f55973aef337024c0ea34b7e9e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import sys
badChars = []
for bad in sys.argv[1:]:
badChars.append(int(bad, 0))
counter = 0x00
testBed = '"'
print("[+] Generating Test Chars: ")
while counter <= 0xFF:
if counter not in badChars:
testBed += "\\x%02x" %counter
counter += 1
testBed += '"'
print("[+] Done generating! \n\n")
print(testBed + "\n\n")
# "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
| 44.709677
| 1,028
| 0.7114
| 305
| 1,386
| 3.232787
| 0.937705
| 0.024341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251333
| 0.05267
| 1,386
| 31
| 1,028
| 44.709677
| 0.499619
| 0.756133
| 0
| 0
| 0
| 0
| 0.192878
| 0
| 0
| 1
| 0.023739
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5fcc3c76a068df68a597bcd0e0ded33269e7e230
| 54
|
py
|
Python
|
src/abaqus/__init__.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/__init__.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/__init__.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from .Canvas.Highlight import *
from .abaqus import *
| 18
| 31
| 0.759259
| 7
| 54
| 5.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 32
| 27
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
39561cc873c9278a94a2878951771e90f438decc
| 92
|
py
|
Python
|
sandbox/testapp/models.py
|
zetahernandez/django-cid
|
1a41d1739ba768cecc5fbc2eede80db9a9cc2898
|
[
"BSD-3-Clause"
] | 14
|
2019-04-24T15:15:08.000Z
|
2022-03-23T17:27:14.000Z
|
sandbox/testapp/models.py
|
zetahernandez/django-cid
|
1a41d1739ba768cecc5fbc2eede80db9a9cc2898
|
[
"BSD-3-Clause"
] | 16
|
2015-04-12T23:59:32.000Z
|
2018-06-06T19:33:10.000Z
|
sandbox/testapp/models.py
|
Polyconseil/cid
|
595f64a51a71bd4a1d47eefdb56002d72629d603
|
[
"BSD-3-Clause"
] | 8
|
2015-07-03T20:37:12.000Z
|
2018-06-06T19:19:04.000Z
|
from django.db import models
class Item(models.Model):
number = models.IntegerField()
| 15.333333
| 34
| 0.73913
| 12
| 92
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163043
| 92
| 5
| 35
| 18.4
| 0.883117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
396b0baa35e69bd27839a3eefbbcb06bcd6e9973
| 197
|
py
|
Python
|
Data Structures/Arrays/Arrays Left Rotation.py
|
vipmunot/HackerRank
|
39d1beb97545592da5cec6e4b9ae0fce32f5ec39
|
[
"MIT"
] | null | null | null |
Data Structures/Arrays/Arrays Left Rotation.py
|
vipmunot/HackerRank
|
39d1beb97545592da5cec6e4b9ae0fce32f5ec39
|
[
"MIT"
] | null | null | null |
Data Structures/Arrays/Arrays Left Rotation.py
|
vipmunot/HackerRank
|
39d1beb97545592da5cec6e4b9ae0fce32f5ec39
|
[
"MIT"
] | null | null | null |
n,d = input().split(' ')
n = int(n)
d = int(d)
a = [int(v) for v in input().split(' ')]
def left_rotation(a,n,d):
return a[d:] + a[:d]
result = left_rotation(a,n,d)
print(*result,sep=' ')
| 21.888889
| 40
| 0.548223
| 38
| 197
| 2.789474
| 0.421053
| 0.075472
| 0.245283
| 0.264151
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192893
| 197
| 9
| 41
| 21.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0.125
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
399ba0b88ad5a2311ae6883b3c957d05eb0540af
| 381
|
py
|
Python
|
swagger_client/models/__init__.py
|
michaelaye/opus_openapi
|
4906107e13c0c23222a469da3904795c77062042
|
[
"MIT"
] | 1
|
2019-07-07T18:20:56.000Z
|
2019-07-07T18:20:56.000Z
|
swagger_client/models/__init__.py
|
michaelaye/opus_openapi
|
4906107e13c0c23222a469da3904795c77062042
|
[
"MIT"
] | null | null | null |
swagger_client/models/__init__.py
|
michaelaye/opus_openapi
|
4906107e13c0c23222a469da3904795c77062042
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Ring-Moon Systems OpenAPI
This is a simple API wrapping the OPUS API of the PDS Ring-Moon Systems node. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: kmichael.aye@gmail.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
| 22.411765
| 95
| 0.716535
| 58
| 381
| 4.62069
| 0.775862
| 0.059701
| 0.11194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026059
| 0.194226
| 381
| 16
| 96
| 23.8125
| 0.846906
| 0.800525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
39dc0fa636544463eb0530352708797a7985007e
| 52
|
py
|
Python
|
main.py
|
Nepumi-Jr/Line-Schedule-Notification
|
1eac63dae919923eb4c8eab76dec7937254215f8
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Nepumi-Jr/Line-Schedule-Notification
|
1eac63dae919923eb4c8eab76dec7937254215f8
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Nepumi-Jr/Line-Schedule-Notification
|
1eac63dae919923eb4c8eab76dec7937254215f8
|
[
"Apache-2.0"
] | null | null | null |
from src import discordStuff
discordStuff.runBot()
| 13
| 28
| 0.826923
| 6
| 52
| 7.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 52
| 3
| 29
| 17.333333
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f2e6a5d7deb9b5042a8bd3410501e267b5ef9bbc
| 1,714
|
py
|
Python
|
tasks/tests/integration/test_mission.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
tasks/tests/integration/test_mission.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | 11
|
2019-11-12T23:26:45.000Z
|
2021-06-10T17:37:23.000Z
|
tasks/tests/integration/test_mission.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
import pytest
from django.test import Client
@pytest.mark.django_db
def test_mission_list(task, user1):
client = Client()
client.force_login(user1)
# Mission list
response = client.get('/api/v1/missions/')
assert response.status_code == 200
assert response.data == [
{'id': 1, 'name': 'Test mission', 'description': '', 'instruction': '',
'tasks_count': 1, 'achievements_count': 0, 'metadata': {}, 'total_exp': 10},
{'id': 2, 'name': 'Test mission other', 'description': '', 'instruction': '',
'tasks_count': None, 'achievements_count': 0, 'metadata': {}},
]
# Mission detail, mission found
mission_id = 1
response = client.get('/api/v1/missions/{0}/'.format(mission_id))
assert response.status_code == 200
assert response.data == {'id': 1, 'name': 'Test mission', 'description': '', 'instruction': '',
'tasks_count': 1, 'achievements_count': 0, 'metadata': {}, 'total_exp': None}
# Mission detail, mission not found
mission_id = 3
response = client.get('/api/v1/missions/{0}/'.format(mission_id))
assert response.status_code == 404
assert response.data["detail"].code == "not_found"
@pytest.mark.django_db
def test_mission_list(task_with_items, user1):
client = Client()
client.force_login(user1)
# Mission detail, mission found
mission_id = 1
response = client.get('/api/v1/missions/{0}/'.format(mission_id))
assert response.status_code == 200
assert response.data == {'id': 1, 'name': 'Test mission', 'description': '', 'instruction': '',
'tasks_count': 1, 'achievements_count': 0, 'metadata': {}, 'total_exp': 10}
| 38.088889
| 106
| 0.623104
| 203
| 1,714
| 5.103448
| 0.241379
| 0.108108
| 0.065637
| 0.07722
| 0.790541
| 0.790541
| 0.761583
| 0.761583
| 0.681467
| 0.604247
| 0
| 0.030191
| 0.207701
| 1,714
| 44
| 107
| 38.954545
| 0.732695
| 0.061844
| 0
| 0.5
| 0
| 0
| 0.27199
| 0.039301
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
842371bf9970b6b300283cbf98217ed99880f3ec
| 1,856
|
py
|
Python
|
pyaz/bot/email/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/bot/email/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/bot/email/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage the email Channel on a bot.
'''
from ... pyaz_utils import _call_az
def create(email_address, name, password, resource_group, add_disabled=None):
'''
Create the Email Channel on a bot.
Required Parameters:
- email_address -- The email address for the bot.
- name -- The resource name of the bot. Bot name must be between 4 and 42 characters in length. Bot name can only have the following characters -, a - z, A - Z, 0 - 9, and _.
- password -- The email password for the bot.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- add_disabled -- Add the channel in a disabled state
'''
return _call_az("az bot email create", locals())
def show(name, resource_group, with_secrets=None):
'''
Get details of the email Channel on a bot
Required Parameters:
- name -- The resource name of the bot. Bot name must be between 4 and 42 characters in length. Bot name can only have the following characters -, a - z, A - Z, 0 - 9, and _.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- with_secrets -- Show secrets in response for the channel.
'''
return _call_az("az bot email show", locals())
def delete(name, resource_group):
'''
Delete the email Channel on a bot
Required Parameters:
- name -- The resource name of the bot. Bot name must be between 4 and 42 characters in length. Bot name can only have the following characters -, a - z, A - Z, 0 - 9, and _.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az bot email delete", locals())
| 40.347826
| 178
| 0.688578
| 280
| 1,856
| 4.478571
| 0.210714
| 0.093301
| 0.047847
| 0.054226
| 0.705742
| 0.705742
| 0.636364
| 0.636364
| 0.605263
| 0.605263
| 0
| 0.010409
| 0.223599
| 1,856
| 45
| 179
| 41.244444
| 0.85982
| 0.735453
| 0
| 0
| 0
| 0
| 0.145889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.142857
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
843e33d875073b03164ee6da878cfb2c336d8965
| 99
|
py
|
Python
|
stackexchange/rest/__init__.py
|
sourcepirate/stackexchange
|
0c8f3491d5ea527e4439c9926c9b5542261dae53
|
[
"MIT"
] | 2
|
2015-08-22T02:52:24.000Z
|
2015-11-04T17:57:01.000Z
|
stackexchange/rest/__init__.py
|
plasmashadow/stackexchange
|
0c8f3491d5ea527e4439c9926c9b5542261dae53
|
[
"MIT"
] | 2
|
2018-09-28T03:13:00.000Z
|
2020-10-18T02:24:46.000Z
|
stackexchange/rest/__init__.py
|
plasmashadow/stackexchange
|
0c8f3491d5ea527e4439c9926c9b5542261dae53
|
[
"MIT"
] | 1
|
2018-09-26T08:05:08.000Z
|
2018-09-26T08:05:08.000Z
|
__author__ = 'plasmashadow'
from .answer import *
from .questions import *
from .search import *
| 14.142857
| 27
| 0.737374
| 11
| 99
| 6.272727
| 0.636364
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171717
| 99
| 6
| 28
| 16.5
| 0.841463
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8441bfc790fba04dbf9c58239a5e6a9ded7e50b3
| 60
|
py
|
Python
|
tests/__init__.py
|
Bram-Hub/Forseti
|
6431f710f9b4b9d9c79bf14f3e7d66e2ed57bd34
|
[
"MIT"
] | 4
|
2018-08-14T23:08:14.000Z
|
2020-09-08T19:04:03.000Z
|
tests/__init__.py
|
Bram-Hub/Forseti
|
6431f710f9b4b9d9c79bf14f3e7d66e2ed57bd34
|
[
"MIT"
] | 7
|
2015-04-23T21:28:20.000Z
|
2016-02-26T16:10:48.000Z
|
tests/__init__.py
|
Bram-Hub/Forseti
|
6431f710f9b4b9d9c79bf14f3e7d66e2ed57bd34
|
[
"MIT"
] | 3
|
2019-01-16T21:04:36.000Z
|
2021-02-16T09:43:48.000Z
|
"""
Test directory for ensuring working Forseti library
"""
| 15
| 51
| 0.75
| 7
| 60
| 6.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 60
| 3
| 52
| 20
| 0.882353
| 0.85
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
845f5c2e957974d57d1ea92e95dbf45d7b8d38e0
| 154
|
py
|
Python
|
exe30.py
|
ClevertonCodev/Python
|
5fc9c372ba9053cdbeefcae110b3f32474ce8f74
|
[
"MIT"
] | null | null | null |
exe30.py
|
ClevertonCodev/Python
|
5fc9c372ba9053cdbeefcae110b3f32474ce8f74
|
[
"MIT"
] | null | null | null |
exe30.py
|
ClevertonCodev/Python
|
5fc9c372ba9053cdbeefcae110b3f32474ce8f74
|
[
"MIT"
] | null | null | null |
numero = int(input('Me diga um número qualquer! '))
resul= numero % 2
if resul == 0:
print('Esse número é PAR')
else:
print('Esse número é impar')
| 25.666667
| 51
| 0.649351
| 25
| 154
| 4
| 0.72
| 0.18
| 0.3
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.207792
| 154
| 6
| 52
| 25.666667
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0.412903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ffbf53311507416bb8b3031bd4c1853c8d73c0bd
| 33
|
py
|
Python
|
python/testData/codeInsight/smartEnter/multilineListLiteralItemFollowedByComment.py
|
alexey-anufriev/intellij-community
|
ffcd46f14e630acdefcc76e2bfc7c43d2449013a
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/codeInsight/smartEnter/multilineListLiteralItemFollowedByComment.py
|
alexey-anufriev/intellij-community
|
ffcd46f14e630acdefcc76e2bfc7c43d2449013a
|
[
"Apache-2.0"
] | 1
|
2020-07-30T19:04:47.000Z
|
2020-07-30T19:04:47.000Z
|
python/testData/codeInsight/smartEnter/multilineListLiteralItemFollowedByComment.py
|
bradleesand/intellij-community
|
750ff9c10333c9c1278c00dbe8d88c877b1b9749
|
[
"Apache-2.0"
] | 1
|
2020-10-15T05:56:42.000Z
|
2020-10-15T05:56:42.000Z
|
xs = [
42<caret> # comment
]
| 11
| 24
| 0.484848
| 4
| 33
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.333333
| 33
| 3
| 25
| 11
| 0.636364
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ffc85816b0d971cef21b4cf57dc18ba8a0fca8b8
| 994
|
py
|
Python
|
RegressionLogistic/explore.py
|
ThomasCaud/python
|
369405f37201a460057ec01c4f7ba4aa32d7e040
|
[
"MIT"
] | null | null | null |
RegressionLogistic/explore.py
|
ThomasCaud/python
|
369405f37201a460057ec01c4f7ba4aa32d7e040
|
[
"MIT"
] | null | null | null |
RegressionLogistic/explore.py
|
ThomasCaud/python
|
369405f37201a460057ec01c4f7ba4aa32d7e040
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import exp, log
# import scipy as scipy
# from scipy import optimize
data = np.loadtxt('data.txt')
hasSucceeded = data[:,0]
isNotGI = data[:,1]
semesterNumber = data[:,2]
n, = hasSucceeded.shape
# Question 3 - Calcul de la vraisemblance
def probX(alpha):
return (exp(alpha))/(1+exp(alpha))
def probXi(alpha,Yi):
return (probX(alpha)**(Yi))*((1-probX(alpha))**(1-Yi))
def vraisemblance(theta):
a,b,c = theta
# todo: vérifier l'intégrité de l'utilisation de semesterNumber, qui n'est pas une variable booléenne !
alpha = a + b*isNotGI + c*semesterNumber
return np.prod(probXi(alpha, hasSucceeded))
# Ebauche question 4 - logvraisemblance
def logvraisemblance(theta):
a,b,c = theta
# todo: vérifier l'intégrité de l'utilisation de semesterNumber, qui n'est pas une variable booléenne !
alpha = a + b*isNotGI + c*semesterNumber
return np.sum(hasSucceeded*(alpha) + log(1-(1/(1+exp(-(alpha))))))
# scipy.optimize.minize(-1 * logvraisemblance, (0,0,0))
| 29.235294
| 104
| 0.714286
| 149
| 994
| 4.765101
| 0.355705
| 0.011268
| 0.025352
| 0.022535
| 0.371831
| 0.371831
| 0.371831
| 0.371831
| 0.371831
| 0.371831
| 0
| 0.017544
| 0.139839
| 994
| 33
| 105
| 30.121212
| 0.812866
| 0.386318
| 0
| 0.210526
| 0
| 0
| 0.013311
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 1
| 0.210526
| false
| 0
| 0.105263
| 0.105263
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
f24508f497ca5715add07dd4d5fdba25693f5ab4
| 93
|
py
|
Python
|
gramclone/apps.py
|
kevin3708/gramclone
|
e04fcec1c8930a51fc618904f39baca26618b13f
|
[
"MIT"
] | null | null | null |
gramclone/apps.py
|
kevin3708/gramclone
|
e04fcec1c8930a51fc618904f39baca26618b13f
|
[
"MIT"
] | null | null | null |
gramclone/apps.py
|
kevin3708/gramclone
|
e04fcec1c8930a51fc618904f39baca26618b13f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class GramcloneConfig(AppConfig):
name = 'gramclone'
| 15.5
| 33
| 0.763441
| 10
| 93
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 93
| 5
| 34
| 18.6
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
f25895d82a3fbcbde336ef0323e59682a9cbbc3f
| 3,180
|
py
|
Python
|
d2.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d2.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d2.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
ins = """RUDULRLLUULRURDDRRUDURULLLDRLRLUDDLUDUDDUDRRDUDULDUUULLRULLRLDDLDLDDRLRRRRUDLLDDUULDRLLUDDRRUURLULRRRDLLURRUUDURUDDURLUDDDLUDDUUDUURUDLRDRDRLRDRLDRUDRUUDLRDDRRURDDLRDDRRURDUDDLULLUDRURURRRLRRUDUULULULRRLDLUDUURRLLRUDLLDRDDLRRRULRUDLULDDLLLULDLRUDLLLLRDDLRDRLDRLLRDRRDLRDULULRLLLDRUDRRRUULRUULDRURLUDRURRDLLDLRDLDDDDRRLUDLRRLUUUURDRDDLRRURURRDUULLRLURLURUDDDRDURDUUDRLRLRRLDDLDLDLDDDUDDULURLDDLLRLRRDULUDDLULRLUDDLDLRULUUUDRLDRUDURLUDDRLLRUULDLRRRRDLLLLURULLRDRRUDLUULRRDLLRLRLUDLDDULLDLLRDLDLL
LLUUUUUUDUDRLRDRDLDURRRLLRRLRURLLUURRLLUDUDLULUURUUURDLUDLDDLULLRDLRUULDLRDUDURLLDDUDUDULLUDDUULLLUULRRRLULRURRDLRUDUDDURRRDRUURDURLLULLRULLDRUULLURLDRDUUDDDDDDRRLDRLRRRLULDDUURRLLLLDRURLURDRDRDURUDUURRDUDUDRLLUUDDRLUDDDRDLDLRLDRURRDLLRULDRLLURURRLUULLRLRRURDDRDRUUURUURUUUDLLRRLUDRLDLRLURLDLUDDUDDDLDUDRRLDLRURULRLLRDUULURRRULDLLLRLDDDUURRRRDULLRURRLULULDLRRUDUDDLRUURDLDUDDUDRRDLRRRDUDUUUDLLDDDDLURLURRRUUULLLULRRLLLLLLULDUUDLRUDRRDLRDUUDUDLLRLDLLRUURDUUURUUUDDLLUUDLULDURLULULUUUDRUDULLURRULRULLRDLDDU
RLUUURULLDLRLDUDRDURRDUURLLUDDDUULRRRLRLURDDRUULUDULDUUDDDDUDDDDRUDDLDUUDRUDLRRRLLRDDLLLRLLRUULRUULDDRURRLURRLRLULDDRRRDDURDDRDRDULRUDRUUDULRLLULDLRLLDRULRDDRRDDUDLRLLUDRDRRRLUDULRDLRDDURRUUDDRRUDURRUUUDDRRDUDURLUUDUDUURDDDLURLULLUULULURUDUUDRUDULLUUULURDLDUULLDDLLDULRLRLRDUUURUUDLRLDURUDRLDULLUDLDLLRDUURRDUDURLUUUDLLRRULRLULRLDLLURDURRULRLLRRDUDLLRDRRRRDLUUDRUUUDDLRLUDDDDDDRURRRUUURRDLLRURLDDLLDLRRLLLDRRULRRUDLDRDDRRLULURLLUURURURRRRUUUUURUDURLRLLLULULDLLDLRDRRULUDUDRDRRDRDRRDUDLLLRUDRUDDDULRULRRRDRLRUUUURUDURDUUULLULRUDDULDUUDLDURRD
ULRULDDLDLULLLRRRLRUDDDDDLLDDUDLRRDULUUDRDLRRURDRRLUULRURUDRRULDLLLUDRUUDULULUDDRUDDDRDURRRDRDUUURLRDULUDRDRLDRUDDLLLDRRULUDLUDLDLLRRUDUULULDLDLLUURDLDDLLUUDURLURLLLDRDLDRRLRULUURRDRULRUUURULRRUDDDDLLDLDDLLRRLRRRRDUUDUDLDRDRRURDLRURULDLRDLLLLRUDRLLRDLRLRDURDRUDURRRLRDRDLLRLUDDDDRLRLLDUURRURLUURUULUDLUURDRRUDDLUDUDDDURRDRUDRLRULDULUUUUUUDDUDRUDUUURUDRRDLUDLUUDUULUDURDLDDDLLURRURUUDUDDRRDRLLULULDRLRURRDDDRDUUURDDDRULUDRDDLDURRLDDDLRRRLDDRDURULDLUDLLLURLURRLRRULDLLDDUDRRULDRRRRLURRUULRRRUDLURDLLDLLDULUUDRRLDLLLDRLRUDLUULDLDRUDUDURDRUDRDDDLRLULLUR
LRLUUURRLRRRRRUURRLLULRLULLDLUDLUDRDDRLDLRLULLURDURLURDLLRLDUUDDURRRRLDLLRULLRLDLLUUDRLDDLLDRULDRLLRURDLRURRUDLULLRURDLURRURUDULLDRLLUUULUDRURRUUDUDULUUULRLDDULDRDLUDDUDDDLRURULLDLLLRLLUURDLRUDLLLLDLLRLRUUUDDRUUUUDLDLRDDURLDURUULLLUUDLLLLDULRRRLLDLDRRDRLUDRUDURLLUDLRLLUDUDRDDDRDLRDLRULUULDRLUDLRLDUURLRRLUDDDUUDDDUDRLDLDUDLURUULLDDDURUUULRLUDLDURUUDRDRURUDDUURDUUUDLLDLDLDURUURLLLLRURUURURULRULLRUDLRRUUUUUDRRLLRDDUURDRDRDDDUDRLURDRRRUDLLLDURDLUUDLLUDDULUUDLDUUULLDRDLRURUURRDURRDLURRRRLLUUULRDULDDLDUURRDLDLLULRRLLUDLDUDLUUL"""
things = ins.split('\n')
c = [[0,0,1,0,0],[0,2,3,4,0],[5,6,7,8,9],[0,'A','B','C',0],[0,0,'D',0,0]]
x = 1
y = 1
for i in things:
for j in i:
ci,cy = x,y
if j == 'R':
x += 1
elif j == 'L':
x -= 1
elif j == 'U':
y -= 1
elif j == 'D':
y += 1
if x == 5:
x = 4
if y == 5:
y = 4
if x == -1:
x = 0
if y == -1:
y = 0
if c[y][x] == 0:
x,y = ci,cy
print(c[y][x])
| 90.857143
| 549
| 0.86761
| 105
| 3,180
| 26.27619
| 0.352381
| 0.004349
| 0.006524
| 0.005074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01247
| 0.092138
| 3,180
| 34
| 550
| 93.529412
| 0.943194
| 0
| 0
| 0
| 0
| 0
| 0.825786
| 0.821384
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.032258
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f281f0e5b53560d827e2377525b945ea930c4409
| 332
|
py
|
Python
|
src/config_handler.py
|
thisistrivial/intellijournal
|
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
|
[
"MIT"
] | null | null | null |
src/config_handler.py
|
thisistrivial/intellijournal
|
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
|
[
"MIT"
] | null | null | null |
src/config_handler.py
|
thisistrivial/intellijournal
|
2f25d72c29159ddfc9bf2ebe7de27d887d001f70
|
[
"MIT"
] | null | null | null |
import dir_handler
def get_config():
return dir_handler.get_config()
def get_config_default():
return get_config()['DEFAULT']
def get_default_attr(attr):
config_default = get_config_default()
if attr in config_default:
return config_default[attr]
return None
def get_editor():
return get_default_attr('editor')
| 19.529412
| 39
| 0.762048
| 48
| 332
| 4.916667
| 0.270833
| 0.330508
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 332
| 16
| 40
| 20.75
| 0.830986
| 0
| 0
| 0
| 0
| 0
| 0.039157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.083333
| 0.25
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
f290e9f8fbc16115f1f570f5e3e22efccd96e0a9
| 22
|
py
|
Python
|
examples/api_getattr.py
|
kurazu/pycon2015
|
fd3878bfb961ccddb0bfd67e704c50355280d271
|
[
"CC0-1.0"
] | null | null | null |
examples/api_getattr.py
|
kurazu/pycon2015
|
fd3878bfb961ccddb0bfd67e704c50355280d271
|
[
"CC0-1.0"
] | null | null | null |
examples/api_getattr.py
|
kurazu/pycon2015
|
fd3878bfb961ccddb0bfd67e704c50355280d271
|
[
"CC0-1.0"
] | null | null | null |
getattr(o, attr_name)
| 11
| 21
| 0.772727
| 4
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f2b238efa07631e713d953077f6108b5e6fcef60
| 803
|
py
|
Python
|
rfb_mc/component/multi_processing_integrator_z3.py
|
Meterius/rfb-mc
|
94b9717dac8a23f1c956c15805e4c2ca6d617bb4
|
[
"MIT"
] | null | null | null |
rfb_mc/component/multi_processing_integrator_z3.py
|
Meterius/rfb-mc
|
94b9717dac8a23f1c956c15805e4c2ca6d617bb4
|
[
"MIT"
] | null | null | null |
rfb_mc/component/multi_processing_integrator_z3.py
|
Meterius/rfb-mc
|
94b9717dac8a23f1c956c15805e4c2ca6d617bb4
|
[
"MIT"
] | null | null | null |
from rfb_mc.component.multi_processing_integrator import MultiProcessingIntegrator
from rfb_mc.component.runner_z3 import RunnerZ3, FormulaParamsZ3, \
serialize_formula_params_z3, deserialize_formula_params_z3, SerializedFormulaParamsZ3
class MultiProcessingIntegratorZ3(MultiProcessingIntegrator[FormulaParamsZ3, SerializedFormulaParamsZ3]):
@classmethod
def get_runner_class(cls):
return RunnerZ3
@classmethod
def deserialize_formula_params(cls, serialized_formula_params: SerializedFormulaParamsZ3) -> FormulaParamsZ3:
return deserialize_formula_params_z3(serialized_formula_params)
@classmethod
def serialize_formula_params(cls, formula_params: FormulaParamsZ3) -> SerializedFormulaParamsZ3:
return serialize_formula_params_z3(formula_params)
| 44.611111
| 113
| 0.833126
| 76
| 803
| 8.421053
| 0.342105
| 0.203125
| 0.09375
| 0.05625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022599
| 0.118306
| 803
| 17
| 114
| 47.235294
| 0.881356
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.153846
| 0.230769
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
f2ba43c1cbe804acb1b981d0c151d65b0f480d8f
| 2,694
|
py
|
Python
|
tests/test_compare_functions.py
|
Korijn/pygui
|
11be153bbdc389c5749ed82490289d6e2c2f704c
|
[
"MIT"
] | 2
|
2022-02-22T08:10:03.000Z
|
2022-02-22T08:21:48.000Z
|
tests/test_compare_functions.py
|
Korijn/pygui
|
11be153bbdc389c5749ed82490289d6e2c2f704c
|
[
"MIT"
] | 7
|
2022-02-24T16:38:45.000Z
|
2022-03-10T08:31:13.000Z
|
tests/test_compare_functions.py
|
fork-tongue/collagraph
|
7370b4ad8bc58a04c644be5be241e4ccb40f8893
|
[
"MIT"
] | null | null | null |
from functools import partial
from collagraph.compare import equivalent_functions
def test_compare_functions():
def a():
return 2
def b():
return 2
c = lambda: 2 # noqa: E731
assert equivalent_functions(a, b)
assert equivalent_functions(a, c)
assert equivalent_functions(b, c)
val = 3
def a():
return val
assert not equivalent_functions(a, b)
def b():
return val
assert equivalent_functions(a, b)
assert equivalent_functions(a, lambda: val)
assert not equivalent_functions(a, lambda: val + 1)
def test_lambda_functions_with_closures():
collection = {}
def create(number):
collection[number] = lambda: number * 2
for x in range(2):
create(x)
assert collection[0]() != collection[1]()
assert not equivalent_functions(collection[0], collection[1])
def test_partial_functions():
callbacks = []
def double(a):
return a * 2
for x in range(3):
callbacks.append(partial(double, x))
assert not equivalent_functions(callbacks[0], callbacks[1])
other = partial(double, 2)
assert equivalent_functions(other, callbacks[2])
def another_double(b):
return b * 2
another = partial(another_double, 2)
assert equivalent_functions(other, another)
def test_mix_partial_lambda():
def a(x):
return x * 2
b = partial(a)
assert not equivalent_functions(a, b)
def test_similar_lambda_functions():
# Different multiplier
a = lambda x: x * 2 # noqa: E731
b = lambda x: x * 3 # noqa: E731
assert not equivalent_functions(a, b)
# Same multiplier but different var name
# should still result in the same bytecode
b = lambda y: y * 2 # noqa: E731
assert equivalent_functions(a, b)
# Switching the order around will produce
# different bytecode, although the other
b = lambda y: 2 * y # noqa: E731
assert not equivalent_functions(a, b)
def test_closures():
def outer(value):
val = value
def inner():
return val
return inner
a = outer("a")
b = outer("b")
assert a() == "a"
assert b() == "b"
assert not equivalent_functions(a, b)
def test_similar_lambda_function_which_captures_other_function():
def double(a):
return a * 2
def other_double(b):
return b * 2
def add(c):
return c + 2
x = lambda a: double(a) # noqa: E731
y = lambda b: other_double(b) # noqa: E731
z = lambda c: add(c) # noqa: E731
assert equivalent_functions(x, y)
assert not equivalent_functions(x, z)
assert not equivalent_functions(y, z)
| 20.723077
| 65
| 0.631774
| 363
| 2,694
| 4.561983
| 0.181818
| 0.229469
| 0.13285
| 0.169082
| 0.370169
| 0.309179
| 0.221618
| 0.201691
| 0.124396
| 0.060386
| 0
| 0.02606
| 0.273571
| 2,694
| 129
| 66
| 20.883721
| 0.820133
| 0.099109
| 0
| 0.294872
| 0
| 0
| 0.001658
| 0
| 0
| 0
| 0
| 0
| 0.282051
| 1
| 0.25641
| false
| 0
| 0.025641
| 0.141026
| 0.435897
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
f2c61c5e48c6003a5b0b379bc9c9555389c7285c
| 2,520
|
py
|
Python
|
projecteuler/problem_8_largest_submul.py
|
karolciba/playground
|
bfba14eaacfb6e7f820b85f95d9a1a72e251489e
|
[
"Unlicense"
] | null | null | null |
projecteuler/problem_8_largest_submul.py
|
karolciba/playground
|
bfba14eaacfb6e7f820b85f95d9a1a72e251489e
|
[
"Unlicense"
] | null | null | null |
projecteuler/problem_8_largest_submul.py
|
karolciba/playground
|
bfba14eaacfb6e7f820b85f95d9a1a72e251489e
|
[
"Unlicense"
] | null | null | null |
s = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
l = [ int(c) for c in s ]
from operator import mul
from itertools import count
from itertools import chain
from functools import partial
from itertools import imap
import random
import string
# ss = ''.join(random.choice(string.digits) for _ in range(10000))
ll = [ random.choice([1,2,3,4,5,6,7,8,9]) for _ in range(10000) ]
def brute(ints, length):
muls = []
for i in xrange(len(ints)-length):
ll = ints[i:i+length]
m = reduce(mul, ll)
muls.append(m)
return max(muls)
def func(ints, length):
parts = (ints[x:x+length] for x in xrange(len(ints) - length))
fil = [ part for part in parts if 0 not in part ]
mulred = partial(reduce, mul)
muls = imap( mulred, fil )
return max( chain(muls,[0]) )
prod_cache = {}
def prod(ints):
# ints.sort()
# import pdb; pdb.set_trace()
# actually, sorting slowes this a lot
# orig = list(ints)
# ints.sort()
# speeds up 4 times
if 0 in ints:
return 0
t = tuple(ints)
if t in prod_cache:
return prod_cache[t]
size = len(t)
if size == 1:
prod_cache[t] = t[0]
return t[0]
# left = ints[:size/2]
# right = ints[size/2:]
# m = prod(left) * prod(right)
last = ints.pop()
m = prod(ints) * last
prod_cache[t] = m
return m
def dyn(ints, length):
prod_cache = {}
parts = (ints[x:x+length] for x in xrange(len(ints) - length))
muls = [ prod(part) for part in parts ]
return max(muls)
# print func(l,13)
| 37.61194
| 1,006
| 0.754365
| 243
| 2,520
| 7.786008
| 0.345679
| 0.031712
| 0.030127
| 0.023784
| 0.074524
| 0.044397
| 0.044397
| 0.044397
| 0.044397
| 0.044397
| 0
| 0.489321
| 0.163889
| 2,520
| 66
| 1,007
| 38.181818
| 0.408638
| 0.109921
| 0
| 0.139535
| 0
| 0
| 0.44843
| 0.44843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.162791
| 0
| 0.418605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b408095d6a61e636539382c523f90bffaf97cc9
| 68
|
py
|
Python
|
src/cmatools/combine/__init__.py
|
jonathan-winn-geo/cmatools
|
ae044de4bd8f1f86814b07498e46b5a03837e679
|
[
"BSD-3-Clause"
] | null | null | null |
src/cmatools/combine/__init__.py
|
jonathan-winn-geo/cmatools
|
ae044de4bd8f1f86814b07498e46b5a03837e679
|
[
"BSD-3-Clause"
] | 3
|
2020-05-13T10:30:38.000Z
|
2020-05-13T10:32:30.000Z
|
src/cmatools/combine/__init__.py
|
jonathan-winn-geo/cmatools
|
ae044de4bd8f1f86814b07498e46b5a03837e679
|
[
"BSD-3-Clause"
] | 1
|
2020-07-02T16:58:06.000Z
|
2020-07-02T16:58:06.000Z
|
"""An example subpackage used to illustrate integration testing."""
| 34
| 67
| 0.779412
| 8
| 68
| 6.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 1
| 68
| 68
| 0.883333
| 0.897059
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b551c3b3b3b10420ee57b8ddacaf875652fab11
| 102
|
py
|
Python
|
NFCow/shopping_carts/apps.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | null | null | null |
NFCow/shopping_carts/apps.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | 11
|
2016-01-09T06:27:02.000Z
|
2016-01-10T05:21:05.000Z
|
NFCow/shopping_carts/apps.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class ShoppingCartsConfig(AppConfig):
name = 'shopping_carts'
| 17
| 37
| 0.784314
| 11
| 102
| 7.181818
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 102
| 5
| 38
| 20.4
| 0.908046
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4bac49e2d23a6b03ec2250f8ba8eb634c1bd9bc4
| 6,309
|
py
|
Python
|
python/asteria/matter.py
|
IceCubeOpenSource/USSR
|
d96158cb835245c40e5fc57239c6038c87b3ac01
|
[
"BSD-3-Clause"
] | 2
|
2019-05-03T21:09:16.000Z
|
2019-11-20T18:40:52.000Z
|
python/asteria/matter.py
|
IceCubeOpenSource/USSR
|
d96158cb835245c40e5fc57239c6038c87b3ac01
|
[
"BSD-3-Clause"
] | 27
|
2019-03-19T16:02:46.000Z
|
2021-07-07T19:36:57.000Z
|
python/asteria/matter.py
|
icecube/ASTERIA
|
d96158cb835245c40e5fc57239c6038c87b3ac01
|
[
"BSD-3-Clause"
] | 2
|
2019-03-12T22:36:46.000Z
|
2019-05-14T14:14:06.000Z
|
# -*- coding: utf-8 -*-
"""Module for Earth and stellar density models.
"""
import numpy as np
from astropy import units as u
from abc import ABC, abstractmethod
class Body(ABC):
"""Base class defining the interface for matter interactions."""
def __init__(self):
pass
@abstractmethod
def density(self, r):
"""Return density as a function of distance from the core."""
pass
@abstractmethod
def y_e(self, r):
"""Return electron fraction as a function of distance from the core."""
pass
class PREM(Body):
"""Preliminary Reference Earth Model: A. M. Dziewonski and D. L. Anderson,
PEPI 25:297-356, 1981.
"""
def __init__(self):
# Radial bin edges, in units of km.
self._rbins = np.asarray(
[ 0.0, 1221.5, 3480.0, 3630.0, 5701.0, 5771.0,
5971.0, 6151.0, 6346.6, 6356.0, 6368.0, 6371.0])
# Density coefficients in units of kg/m3, kg/m4, kg/m5.
self._dcoef = np.asarray(
[[1.3088e4, 1.9110e-8, -2.1773e-10],
[1.2346e4, 1.3976e-4, -2.4123e-10],
[7.3067e3, -5.0007e-4, 0.0000],
[6.7823e3, -2.4441e-4, -3.0922e-11],
[5.3197e3, -2.3286e-4, 0.0000],
[1.1249e4, -1.2603e-3, 0.0000],
[7.1083e3, -5.9706e-4, 0.0000],
[2.6910e3, 1.0869e-4, 0.0000],
[2.9000e3, 0.0000, 0.0000],
[2.6000e3, 0.0000, 0.0000],
[1.0200e3, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000]])
# Electron fraction values: inner/outer core and mantle.
self._ye = np.asarray([ 0.4656, 0.4957 ])
def density(self, r):
"""Return density as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
rho : float or ndarray
Density at radial position(s) r.
"""
index = np.digitize(r.to('km').value, self._rbins) - 1
if type(r.value) in [list, np.ndarray]:
rho = []
for i, x in enumerate(r.to('m').value):
j = index[i]
c0, c1, c2 = self._dcoef[j]
rho.append(1e-3 * (c0 + c1*x + c2*x**2))
return np.asarray(rho) * u.g / u.cm**3
else:
x = r.to('m').value
c0, c1, c2 = self._dcoef[index]
return 1e-3 * (c0 + c1*x + c2*x**2) * u.g / u.cm**3
def y_e(self, r):
"""Return electron fraction as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
Y_e : float or ndarray
Electron fraction at radial position(s) r.
"""
idx = np.digitize(r.to('km').value, [0., 3480., 6371.]) - 1
return self._ye[idx]
class SimpleEarth(Body):
"""A constant-density 13-layer approximation of the Preliminary Reference
Earth Model: A. M. Dziewonski and D. L. Anderson, PEPI 25:297-356, 1981.
"""
def __init__(self):
# Radial bin edges, in units of km.
self._rbins = np.asarray(
[0.0, 1221.5, 1786.125, 2350.75 , 2915.375, 3480.0,
4220.3, 4960.7, 5701.0, 5771.0, 5971.0, 6151.0,
6346.0, 6356.0, 6368.0, 6371.0])
# Constant density values, in units of g/cm3.
self._rho = np.asarray(
[12.9792, 12.0042, 11.5966, 11.0351, 10.3155,
5.3828, 5.0073, 4.5988, 3.9840, 3.8496,
3.4894, 3.3701, 2.9000, 2.6000, 1.0200, 0.0000])
# Electron fraction values: inner/outer core and mantle.
self._ye = np.asarray([ 0.4656, 0.4957 ])
def density(self, r):
"""Return density as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
rho : float or ndarray
Density at radial position(s) r.
"""
idx = np.digitize(r.to('km').value, self._rbins) - 1
return self._rho[idx] * u.g / u.cm**3
def y_e(self, r):
"""Return electron fraction as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
Y_e : float or ndarray
Electron fraction at radial position(s) r.
"""
idx = np.digitize(r.to('km').value, [0., 3480., 6371.]) - 1
return self._ye[idx]
class TwoLayerModel(Body):
"""A simple two-layered model for the densities of the core and the mantle
"""
def __init__(self):
# Radial bin edges, in units of km.
self._rbins = np.asarray(
[0.0, 3480.0, 6371.0])
# Constant density values, in units of g/cm3.
self._rho = np.asarray(
[11.5000, 4.5000, 0.0000])
# Electron fraction values: inner/outer core and mantle.
self._ye = np.asarray([ 0.4656, 0.4957 ])
def density(self, r):
"""Return density as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
rho : float or ndarray
Density at radial position(s) r.
"""
idx = np.digitize(r.to('km').value, self._rbins) - 1
return self._rho[idx] * u.g / u.cm**3
def y_e(self, r):
"""Return electron fraction as a function of distance from the core.
Parameters
----------
r : float or ndarray
Radial distance from core, in length units (astropy).
Returns
-------
Y_e : float or ndarray
Electron fraction at radial position(s) r.
"""
idx = np.digitize(r.to('km').value, [0., 3480., 6371.]) - 1
return self._ye[idx]
| 31.545
| 79
| 0.517198
| 854
| 6,309
| 3.771663
| 0.21897
| 0.024837
| 0.052158
| 0.032288
| 0.724309
| 0.713133
| 0.705681
| 0.705681
| 0.692021
| 0.665321
| 0
| 0.13932
| 0.346965
| 6,309
| 199
| 80
| 31.703518
| 0.642476
| 0.387225
| 0
| 0.454545
| 0
| 0
| 0.004199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155844
| false
| 0.038961
| 0.038961
| 0
| 0.337662
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4bb84e4e2680a34fcc0c3492b03b900323dcf16d
| 46
|
py
|
Python
|
scribdl/__init__.py
|
beazcode/scribd-downloader
|
842483a661d082a2e93c78dfcb0dccf2484ca3c1
|
[
"MIT"
] | 11
|
2019-11-05T03:27:54.000Z
|
2021-09-13T22:32:22.000Z
|
scribdl/__init__.py
|
shivasiddharth/scribd-downloader
|
842483a661d082a2e93c78dfcb0dccf2484ca3c1
|
[
"MIT"
] | null | null | null |
scribdl/__init__.py
|
shivasiddharth/scribd-downloader
|
842483a661d082a2e93c78dfcb0dccf2484ca3c1
|
[
"MIT"
] | 9
|
2018-07-23T07:59:28.000Z
|
2021-08-05T03:30:08.000Z
|
__version__ = '0.1.3'
from .scribdl import *
| 11.5
| 22
| 0.673913
| 7
| 46
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 0.173913
| 46
| 3
| 23
| 15.333333
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
29bef796b4a63fb9d174bc8abee3e661f97f44bb
| 31
|
py
|
Python
|
src/pydantic_cloud_configuration/__init__.py
|
nilsdebruin/pydantic-cloud-settings
|
3e4aedc8741044a030a690af1be735d82d6ad2be
|
[
"MIT"
] | null | null | null |
src/pydantic_cloud_configuration/__init__.py
|
nilsdebruin/pydantic-cloud-settings
|
3e4aedc8741044a030a690af1be735d82d6ad2be
|
[
"MIT"
] | null | null | null |
src/pydantic_cloud_configuration/__init__.py
|
nilsdebruin/pydantic-cloud-settings
|
3e4aedc8741044a030a690af1be735d82d6ad2be
|
[
"MIT"
] | null | null | null |
"""Pydantic Cloud Settings."""
| 15.5
| 30
| 0.677419
| 3
| 31
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.75
| 0.774194
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b042972e2193b307152988845e475754f93eed2
| 32
|
py
|
Python
|
codemach/tests/source/globals.py
|
chuck1/myexecutor
|
6b70d2e7bc9ace0efde8e38f75be2b928393bfdf
|
[
"MIT"
] | null | null | null |
codemach/tests/source/globals.py
|
chuck1/myexecutor
|
6b70d2e7bc9ace0efde8e38f75be2b928393bfdf
|
[
"MIT"
] | 37
|
2017-06-29T22:42:08.000Z
|
2019-01-22T18:22:57.000Z
|
codemach/tests/source/globals.py
|
chuck1/myexecutor
|
6b70d2e7bc9ace0efde8e38f75be2b928393bfdf
|
[
"MIT"
] | null | null | null |
a=1
assert(globals()['a'] == a)
| 10.666667
| 27
| 0.53125
| 6
| 32
| 2.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.125
| 32
| 2
| 28
| 16
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b0fe4d27db7e02575eae60772a1ba0736a14dac
| 28
|
py
|
Python
|
modules/2.79/bpy/types/UIPopupMenu.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/UIPopupMenu.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/UIPopupMenu.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
UIPopupMenu.layout = None
| 7
| 25
| 0.75
| 3
| 28
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 3
| 26
| 9.333333
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b14d14d7ec1085f850bf7549fd242a9d347a64f
| 8,638
|
py
|
Python
|
tests/approx_lhd_tests.py
|
vishalbelsare/abcpy
|
72d0d31ae3fa531b69ea3fef39c96af6628ee76f
|
[
"BSD-3-Clause-Clear"
] | 89
|
2017-02-23T23:34:52.000Z
|
2022-03-25T20:35:17.000Z
|
tests/approx_lhd_tests.py
|
vishalbelsare/abcpy
|
72d0d31ae3fa531b69ea3fef39c96af6628ee76f
|
[
"BSD-3-Clause-Clear"
] | 35
|
2017-03-31T13:24:52.000Z
|
2022-01-09T11:31:38.000Z
|
tests/approx_lhd_tests.py
|
vishalbelsare/abcpy
|
72d0d31ae3fa531b69ea3fef39c96af6628ee76f
|
[
"BSD-3-Clause-Clear"
] | 32
|
2017-03-22T06:27:43.000Z
|
2021-09-17T15:50:42.000Z
|
import unittest
import numpy as np
from abcpy.approx_lhd import PenLogReg, SynLikelihood, SemiParametricSynLikelihood
from abcpy.continuousmodels import Normal
from abcpy.continuousmodels import Uniform
from abcpy.statistics import Identity
class PenLogRegTests(unittest.TestCase):
def setUp(self):
self.mu = Uniform([[-5.0], [5.0]], name='mu')
self.sigma = Uniform([[5.0], [10.0]], name='sigma')
self.model = Normal([self.mu, self.sigma])
self.model_bivariate = Uniform([[0, 0], [1, 1]], name="model")
self.stat_calc = Identity(degree=2, cross=1)
self.likfun = PenLogReg(self.stat_calc, [self.model], n_simulate=100, n_folds=10, max_iter=100000, seed=1)
self.likfun_wrong_n_sim = PenLogReg(self.stat_calc, [self.model], n_simulate=10, n_folds=10, max_iter=100000,
seed=1)
self.likfun_bivariate = PenLogReg(self.stat_calc, [self.model_bivariate], n_simulate=100, n_folds=10,
max_iter=100000, seed=1)
self.y_obs = self.model.forward_simulate(self.model.get_input_values(), 1, rng=np.random.RandomState(1))
self.y_obs_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 1,
rng=np.random.RandomState(1))
self.y_obs_double = self.model.forward_simulate(self.model.get_input_values(), 2, rng=np.random.RandomState(1))
self.y_obs_bivariate_double = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 2,
rng=np.random.RandomState(1))
# create fake simulated data
self.mu._fixed_values = [1.1]
self.sigma._fixed_values = [1.0]
self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
self.y_sim_bivariate = self.model_bivariate.forward_simulate(self.model_bivariate.get_input_values(), 100,
rng=np.random.RandomState(1))
def test_likelihood(self):
# Checks whether wrong input type produces error message
self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1])
self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4)
# create observed data
comp_likelihood = self.likfun.loglikelihood(self.y_obs, self.y_sim)
expected_likelihood = 9.77317308598673e-08
# This checks whether it computes a correct value and dimension is right. Not correct as it does not check the
# absolute value:
# self.assertLess(comp_likelihood - expected_likelihood, 10e-2)
self.assertAlmostEqual(comp_likelihood, np.log(expected_likelihood))
# check if it returns the correct error when n_samples does not match:
self.assertRaises(RuntimeError, self.likfun_wrong_n_sim.loglikelihood, self.y_obs, self.y_sim)
# try now with the bivariate uniform model:
comp_likelihood_biv = self.likfun_bivariate.loglikelihood(self.y_obs_bivariate, self.y_sim_bivariate)
expected_likelihood_biv = 0.999999999999999
self.assertAlmostEqual(comp_likelihood_biv, np.log(expected_likelihood_biv))
def test_likelihood_multiple_observations(self):
comp_likelihood = self.likfun.likelihood(self.y_obs_double, self.y_sim)
expected_likelihood = 7.337876253225462e-10
self.assertAlmostEqual(comp_likelihood, expected_likelihood)
expected_likelihood_biv = 0.9999999999999979
comp_likelihood_biv = self.likfun_bivariate.likelihood(self.y_obs_bivariate_double, self.y_sim_bivariate)
self.assertAlmostEqual(comp_likelihood_biv, expected_likelihood_biv)
def test_loglikelihood_additive(self):
comp_loglikelihood_a = self.likfun.loglikelihood([self.y_obs_double[0]], self.y_sim)
comp_loglikelihood_b = self.likfun.loglikelihood([self.y_obs_double[1]], self.y_sim)
comp_loglikelihood_two = self.likfun.loglikelihood(self.y_obs_double, self.y_sim)
self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)
class SynLikelihoodTests(unittest.TestCase):
def setUp(self):
self.mu = Uniform([[-5.0], [5.0]], name='mu')
self.sigma = Uniform([[5.0], [10.0]], name='sigma')
self.model = Normal([self.mu, self.sigma])
self.stat_calc = Identity(degree=2, cross=False)
self.likfun = SynLikelihood(self.stat_calc)
# create fake simulated data
self.mu._fixed_values = [1.1]
self.sigma._fixed_values = [1.0]
self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
def test_likelihood(self):
# Checks whether wrong input type produces error message
self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1])
self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4)
# create observed data
y_obs = [1.8]
# calculate the statistics of the observed data
comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim)
expected_loglikelihood = -0.6434435652263701
# This checks whether it computes a correct value and dimension is right
self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood)
def test_likelihood_multiple_observations(self):
y_obs = [1.8, 0.9]
comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim)
expected_loglikelihood = -1.2726154993040115
# This checks whether it computes a correct value and dimension is right
self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood)
def test_loglikelihood_additive(self):
y_obs = [1.8, 0.9]
comp_loglikelihood_a = self.likfun.loglikelihood([y_obs[0]], self.y_sim)
comp_loglikelihood_b = self.likfun.loglikelihood([y_obs[1]], self.y_sim)
comp_loglikelihood_two = self.likfun.loglikelihood(y_obs, self.y_sim)
self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)
class SemiParametricSynLikelihoodTests(unittest.TestCase):
def setUp(self):
self.mu = Uniform([[-5.0], [5.0]], name='mu')
self.sigma = Uniform([[5.0], [10.0]], name='sigma')
self.model = Normal([self.mu, self.sigma])
self.stat_calc_1 = Identity(degree=1, cross=False)
self.likfun_1 = SemiParametricSynLikelihood(self.stat_calc_1)
self.stat_calc = Identity(degree=2, cross=False)
self.likfun = SemiParametricSynLikelihood(self.stat_calc)
# create fake simulated data
self.mu._fixed_values = [1.1]
self.sigma._fixed_values = [1.0]
self.y_sim = self.model.forward_simulate(self.model.get_input_values(), 100, rng=np.random.RandomState(1))
def test_likelihood(self):
# Checks whether wrong input type produces error message
self.assertRaises(TypeError, self.likfun.loglikelihood, 3.4, [2, 1])
self.assertRaises(TypeError, self.likfun.loglikelihood, [2, 4], 3.4)
# create observed data
y_obs = [1.8]
# check whether it raises correct error with input of wrong size
self.assertRaises(RuntimeError, self.likfun_1.loglikelihood, y_obs, self.y_sim)
# calculate the statistics of the observed data
comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim)
expected_loglikelihood = -2.3069321875272815
# This checks whether it computes a correct value and dimension is right
self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood)
def test_likelihood_multiple_observations(self):
y_obs = [1.8, 0.9]
comp_loglikelihood = self.likfun.loglikelihood(y_obs, self.y_sim)
expected_loglikelihood = -3.7537571275591683
# This checks whether it computes a correct value and dimension is right
self.assertAlmostEqual(comp_loglikelihood, expected_loglikelihood)
def test_loglikelihood_additive(self):
y_obs = [1.8, 0.9]
comp_loglikelihood_a = self.likfun.loglikelihood([y_obs[0]], self.y_sim)
comp_loglikelihood_b = self.likfun.loglikelihood([y_obs[1]], self.y_sim)
comp_loglikelihood_two = self.likfun.loglikelihood(y_obs, self.y_sim)
self.assertAlmostEqual(comp_loglikelihood_two, comp_loglikelihood_a + comp_loglikelihood_b)
if __name__ == '__main__':
unittest.main()
| 52.351515
| 119
| 0.69229
| 1,108
| 8,638
| 5.173285
| 0.120036
| 0.03402
| 0.0321
| 0.04187
| 0.80792
| 0.752966
| 0.711619
| 0.679867
| 0.66783
| 0.642533
| 0
| 0.043885
| 0.208613
| 8,638
| 164
| 120
| 52.670732
| 0.794617
| 0.120977
| 0
| 0.571429
| 0
| 0
| 0.004491
| 0
| 0
| 0
| 0
| 0
| 0.169643
| 1
| 0.107143
| false
| 0
| 0.053571
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4b20af2e4275a515b9c9d9a7808f33d6eac325cc
| 964
|
py
|
Python
|
src/db/__init__.py
|
quickgiant/computone
|
9a49cec64199516f7691503b1b8f9b7596c1b949
|
[
"MIT"
] | 1
|
2022-02-12T00:21:00.000Z
|
2022-02-12T00:21:00.000Z
|
src/db/__init__.py
|
quickgiant/computone
|
9a49cec64199516f7691503b1b8f9b7596c1b949
|
[
"MIT"
] | null | null | null |
src/db/__init__.py
|
quickgiant/computone
|
9a49cec64199516f7691503b1b8f9b7596c1b949
|
[
"MIT"
] | null | null | null |
from .connection import connect_to_db, close_db_connection, get_con
from .files import (
fetch_files,
insert_file,
is_file_in_library_by_path,
)
from .images import (
fetch_image_by_hash,
fetch_image_by_id,
insert_file_image,
insert_image,
)
from .jobs import (
delete_job,
fetch_job,
fetch_jobs,
fetch_job_history,
insert_job,
insert_job_history_item,
update_job_data,
update_job_status,
)
from .setup import setup_db
from .tags import insert_tag
__all__ = [
"connect_to_db",
"close_db_connection",
"get_con",
"fetch_files",
"insert_file",
"is_file_in_library_by_path",
"fetch_image_by_hash",
"fetch_image_by_id",
"insert_file_image",
"insert_image",
"delete_job",
"fetch_job",
"fetch_jobs",
"fetch_job_history",
"insert_job",
"insert_job_history_item",
"update_job_data",
"update_job_status",
"setup_db",
"insert_tag",
]
| 20.083333
| 67
| 0.687759
| 131
| 964
| 4.480916
| 0.259542
| 0.068143
| 0.081772
| 0.054514
| 0.780239
| 0.780239
| 0.780239
| 0.780239
| 0.664395
| 0.664395
| 0
| 0
| 0.216805
| 964
| 47
| 68
| 20.510638
| 0.777483
| 0
| 0
| 0
| 0
| 0
| 0.291494
| 0.05083
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d9c652230762630c582ba68e401a7b219c0bf0b4
| 132
|
py
|
Python
|
fspdf/__main__.py
|
kaiec/fspdf
|
c69393d028cb218ea2cb012b79b3140ed428be5a
|
[
"MIT"
] | 3
|
2021-07-19T06:55:24.000Z
|
2021-11-05T19:38:39.000Z
|
fspdf/__main__.py
|
kaiec/fspdf
|
c69393d028cb218ea2cb012b79b3140ed428be5a
|
[
"MIT"
] | 4
|
2018-04-24T08:53:57.000Z
|
2022-01-13T00:45:14.000Z
|
fspdf/__main__.py
|
kaiec/fspdf
|
c69393d028cb218ea2cb012b79b3140ed428be5a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""fspdf.__main__: executed when fspdf directory is called as script."""
from .fspdf import main
main()
| 14.666667
| 72
| 0.666667
| 18
| 132
| 4.666667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.174242
| 132
| 8
| 73
| 16.5
| 0.761468
| 0.674242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d9cd589c97eddf2f2fe5c4c4d75d3679e0238af3
| 1,004
|
py
|
Python
|
odoo-14.0/addons/l10n_si/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
odoo-14.0/addons/l10n_si/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
odoo-14.0/addons/l10n_si/__manifest__.py
|
Yomy1996/P1
|
59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
{
"name": "Slovenian - Accounting",
"version": "1.1",
"author": "Mentis d.o.o.",
"website": "http://www.mentis.si",
'category': 'Accounting/Localizations/Account Charts',
"description": "Kontni načrt za gospodarske družbe",
"depends": [
"account",
"base_iban",
],
"data": [
"data/l10n_si_chart_data.xml",
"data/account.account.template.csv",
"data/account.chart.template.csv",
"data/account.tax.group.csv",
"data/account_tax_report_data.xml",
"data/account_tax_data.xml",
"data/account.fiscal.position.template.csv",
"data/account.fiscal.position.account.template.csv",
"data/account.fiscal.position.tax.template.csv",
"data/account_chart_template_data.xml",
],
'demo': [
'demo/demo_company.xml',
],
}
| 30.424242
| 74
| 0.610558
| 118
| 1,004
| 5.084746
| 0.483051
| 0.165
| 0.14
| 0.183333
| 0.26
| 0.236667
| 0
| 0
| 0
| 0
| 0
| 0.011538
| 0.223108
| 1,004
| 32
| 75
| 31.375
| 0.757692
| 0.140438
| 0
| 0.111111
| 0
| 0
| 0.664726
| 0.463329
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d9db00a54292b6ce05cdd372065edb7e39ca3be5
| 74
|
py
|
Python
|
Winston/lesson-14/api_example.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | 2
|
2021-03-20T02:07:19.000Z
|
2021-03-20T02:07:26.000Z
|
Winston/lesson-14/api_example.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Winston/lesson-14/api_example.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | 8
|
2021-02-20T03:10:50.000Z
|
2021-03-20T02:42:45.000Z
|
def getNumber(inputNumber):
return 2*inputNumber
print(getNumber(3))
| 14.8
| 27
| 0.756757
| 9
| 74
| 6.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.135135
| 74
| 5
| 28
| 14.8
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
d9f7584f97310631b924b72fe8577b5316c6457f
| 66,266
|
py
|
Python
|
tests/integration/test_target_snowflake.py
|
deanmorin/pipelinewise-target-snowflake
|
bc79613a0d1ad0bd589856a28444ac1d56125554
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_target_snowflake.py
|
deanmorin/pipelinewise-target-snowflake
|
bc79613a0d1ad0bd589856a28444ac1d56125554
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_target_snowflake.py
|
deanmorin/pipelinewise-target-snowflake
|
bc79613a0d1ad0bd589856a28444ac1d56125554
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import gzip
import json
import tempfile
import unittest
import mock
import os
import botocore
import boto3
import itertools
import target_snowflake
from target_snowflake import RecordValidationException
from target_snowflake.db_sync import DbSync
from target_snowflake.upload_clients.s3_upload_client import S3UploadClient
from pyarrow.lib import ArrowTypeError
from snowflake.connector.errors import ProgrammingError
from snowflake.connector.errors import DatabaseError
try:
import tests.integration.utils as test_utils
except ImportError:
import utils as test_utils
METADATA_COLUMNS = [
'_SDC_EXTRACTED_AT',
'_SDC_BATCHED_AT',
'_SDC_DELETED_AT'
]
class TestIntegration(unittest.TestCase):
"""
Integration Tests
"""
maxDiff = None
def setUp(self):
self.config = test_utils.get_test_config()
snowflake = DbSync(self.config)
# Drop target schema
if self.config['default_target_schema']:
snowflake.query("DROP SCHEMA IF EXISTS {}".format(self.config['default_target_schema']))
# Set up S3 client
aws_access_key_id = self.config.get('aws_access_key_id')
aws_secret_access_key = self.config.get('aws_secret_access_key')
aws_session_token = self.config.get('aws_session_token')
aws_session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token
)
self.s3_client = aws_session.client('s3',
region_name=self.config.get('s3_region_name'),
endpoint_url=self.config.get('s3_endpoint_url'))
def persist_lines(self, lines):
"""Loads singer messages into snowflake without table caching option"""
target_snowflake.persist_lines(self.config, lines)
def persist_lines_with_cache(self, lines):
"""Enables table caching option and loads singer messages into snowflake.
Table caching mechanism is creating and maintaining an extra table in snowflake about
the table structures. It's very similar to the INFORMATION_SCHEMA.COLUMNS system views
but querying INFORMATION_SCHEMA is slow especially when lot of taps running
in parallel.
Selecting from a real table instead of INFORMATION_SCHEMA and keeping it
in memory while the target-snowflake is running results better load performance.
"""
table_cache, file_format_type = target_snowflake.get_snowflake_statics(self.config)
target_snowflake.persist_lines(self.config, lines, table_cache, file_format_type)
def remove_metadata_columns_from_rows(self, rows):
"""Removes metadata columns from a list of rows"""
d_rows = []
for r in rows:
# Copy the original row to a new dict to keep the original dict
# and remove metadata columns
d_row = r.copy()
for md_c in METADATA_COLUMNS:
d_row.pop(md_c, None)
# Add new row without metadata columns to the new list
d_rows.append(d_row)
return d_rows
def assert_metadata_columns_exist(self, rows):
"""This is a helper assertion that checks if every row in a list has metadata columns"""
for r in rows:
for md_c in METADATA_COLUMNS:
self.assertTrue(md_c in r)
def assert_metadata_columns_not_exist(self, rows):
"""This is a helper assertion that checks metadata columns don't exist in any row"""
for r in rows:
for md_c in METADATA_COLUMNS:
self.assertFalse(md_c in r)
def assert_three_streams_are_into_snowflake(self, should_metadata_columns_exist=False,
should_hard_deleted_rows=False):
"""
This is a helper assertion that checks if every data from the message-with-three-streams.json
file is available in Snowflake tables correctly.
Useful to check different loading methods (unencrypted, Client-Side encryption, gzip, etc.)
without duplicating assertions
"""
snowflake = DbSync(self.config)
default_target_schema = self.config.get('default_target_schema', '')
schema_mapping = self.config.get('schema_mapping', {})
# Identify target schema name
target_schema = None
if default_target_schema is not None and default_target_schema.strip():
target_schema = default_target_schema
elif schema_mapping:
target_schema = "tap_mysql_test"
# Get loaded rows from tables
table_one = snowflake.query("SELECT * FROM {}.test_table_one ORDER BY c_pk".format(target_schema))
table_two = snowflake.query("SELECT * FROM {}.test_table_two ORDER BY c_pk".format(target_schema))
table_three = snowflake.query("SELECT * FROM {}.test_table_three ORDER BY c_pk".format(target_schema))
# ----------------------------------------------------------------------
# Check rows in table_one
# ----------------------------------------------------------------------
expected_table_one = [
{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1'}
]
self.assertEqual(
self.remove_metadata_columns_from_rows(table_one), expected_table_one)
# ----------------------------------------------------------------------
# Check rows in table_tow
# ----------------------------------------------------------------------
expected_table_two = []
if not should_hard_deleted_rows:
expected_table_two = [
{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1', 'C_DATE': datetime.datetime(2019, 2, 1, 15, 12, 45)},
{'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2', 'C_DATE': datetime.datetime(2019, 2, 10, 2, 0, 0)}
]
else:
expected_table_two = [
{'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2', 'C_DATE': datetime.datetime(2019, 2, 10, 2, 0, 0)}
]
self.assertEqual(
self.remove_metadata_columns_from_rows(table_two), expected_table_two)
# ----------------------------------------------------------------------
# Check rows in table_three
# ----------------------------------------------------------------------
expected_table_three = []
if not should_hard_deleted_rows:
expected_table_three = [
{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1', 'C_TIME': datetime.time(4, 0, 0)},
{'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2', 'C_TIME': datetime.time(7, 15, 0)},
{'C_INT': 3, 'C_PK': 3, 'C_VARCHAR': '3', 'C_TIME': datetime.time(23, 0, 3)}
]
else:
expected_table_three = [
{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1', 'C_TIME': datetime.time(4, 0, 0)},
{'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2', 'C_TIME': datetime.time(7, 15, 0)}
]
self.assertEqual(
self.remove_metadata_columns_from_rows(table_three), expected_table_three)
# ----------------------------------------------------------------------
# Check if metadata columns exist or not
# ----------------------------------------------------------------------
if should_metadata_columns_exist:
self.assert_metadata_columns_exist(table_one)
self.assert_metadata_columns_exist(table_two)
self.assert_metadata_columns_exist(table_three)
else:
self.assert_metadata_columns_not_exist(table_one)
self.assert_metadata_columns_not_exist(table_two)
self.assert_metadata_columns_not_exist(table_three)
def assert_logical_streams_are_in_snowflake(self, should_metadata_columns_exist=False):
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_one = snowflake.query("SELECT * FROM {}.logical1_table1 ORDER BY CID".format(target_schema))
table_two = snowflake.query("SELECT * FROM {}.logical1_table2 ORDER BY CID".format(target_schema))
table_three = snowflake.query("SELECT * FROM {}.logical2_table1 ORDER BY CID".format(target_schema))
table_four = snowflake.query("SELECT CID, CTIMENTZ, CTIMETZ FROM {}.logical1_edgydata WHERE CID IN(1,2,3,4,5,6,8,9) ORDER BY CID".format(target_schema))
# ----------------------------------------------------------------------
# Check rows in table_one
# ----------------------------------------------------------------------
expected_table_one = [
{'CID': 1, 'CVARCHAR': "inserted row", 'CVARCHAR2': None},
{'CID': 2, 'CVARCHAR': 'inserted row', "CVARCHAR2": "inserted row"},
{'CID': 3, 'CVARCHAR': "inserted row", 'CVARCHAR2': "inserted row"},
{'CID': 4, 'CVARCHAR': "inserted row", 'CVARCHAR2': "inserted row"}
]
# ----------------------------------------------------------------------
# Check rows in table_tow
# ----------------------------------------------------------------------
expected_table_two = [
{'CID': 1, 'CVARCHAR': "updated row"},
{'CID': 2, 'CVARCHAR': 'updated row'},
{'CID': 3, 'CVARCHAR': "updated row"},
{'CID': 5, 'CVARCHAR': "updated row"},
{'CID': 7, 'CVARCHAR': "updated row"},
{'CID': 8, 'CVARCHAR': 'updated row'},
{'CID': 9, 'CVARCHAR': "updated row"},
{'CID': 10, 'CVARCHAR': 'updated row'}
]
# ----------------------------------------------------------------------
# Check rows in table_three
# ----------------------------------------------------------------------
expected_table_three = [
{'CID': 1, 'CVARCHAR': "updated row"},
{'CID': 2, 'CVARCHAR': 'updated row'},
{'CID': 3, 'CVARCHAR': "updated row"},
]
# ----------------------------------------------------------------------
# Check rows in table_four
# ----------------------------------------------------------------------
expected_table_four = [
{'CID': 1, 'CTIMENTZ': None, 'CTIMETZ': None},
{'CID': 2, 'CTIMENTZ': datetime.time(23, 0, 15), 'CTIMETZ': datetime.time(23, 0, 15)},
{'CID': 3, 'CTIMENTZ': datetime.time(12, 0, 15), 'CTIMETZ': datetime.time(12, 0, 15)},
{'CID': 4, 'CTIMENTZ': datetime.time(12, 0, 15), 'CTIMETZ': datetime.time(9, 0, 15)},
{'CID': 5, 'CTIMENTZ': datetime.time(12, 0, 15), 'CTIMETZ': datetime.time(15, 0, 15)},
{'CID': 6, 'CTIMENTZ': datetime.time(0, 0), 'CTIMETZ': datetime.time(0, 0)},
{'CID': 8, 'CTIMENTZ': datetime.time(0, 0), 'CTIMETZ': datetime.time(1, 0)},
{'CID': 9, 'CTIMENTZ': datetime.time(0, 0), 'CTIMETZ': datetime.time(0, 0)}
]
if should_metadata_columns_exist:
self.assertEqual(self.remove_metadata_columns_from_rows(table_one), expected_table_one)
self.assertEqual(self.remove_metadata_columns_from_rows(table_two), expected_table_two)
self.assertEqual(self.remove_metadata_columns_from_rows(table_three), expected_table_three)
self.assertEqual(table_four, expected_table_four)
else:
self.assertEqual(table_one, expected_table_one)
self.assertEqual(table_two, expected_table_two)
self.assertEqual(table_three, expected_table_three)
self.assertEqual(table_four, expected_table_four)
def assert_logical_streams_are_in_snowflake_and_are_empty(self):
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_one = snowflake.query("SELECT * FROM {}.logical1_table1 ORDER BY CID".format(target_schema))
table_two = snowflake.query("SELECT * FROM {}.logical1_table2 ORDER BY CID".format(target_schema))
table_three = snowflake.query("SELECT * FROM {}.logical2_table1 ORDER BY CID".format(target_schema))
table_four = snowflake.query("SELECT CID, CTIMENTZ, CTIMETZ FROM {}.logical1_edgydata WHERE CID IN(1,2,3,4,5,6,8,9) ORDER BY CID".format(target_schema))
self.assertEqual(table_one, [])
self.assertEqual(table_two, [])
self.assertEqual(table_three, [])
self.assertEqual(table_four, [])
def assert_binary_data_are_in_snowflake(self, table_name, should_metadata_columns_exist=False):
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_one = snowflake.query("SELECT * FROM {}.{} ORDER BY ID".format(target_schema, table_name))
# ----------------------------------------------------------------------
# Check rows in table_one
# ----------------------------------------------------------------------
expected_table_one = [
{'ID': b'pk2', 'DATA': b'data2', 'CREATED_AT': datetime.datetime(2019, 12, 17, 16, 2, 55)},
{'ID': b'pk4', 'DATA': b'data4', "CREATED_AT": datetime.datetime(2019, 12, 17, 16, 32, 22)},
]
if should_metadata_columns_exist:
self.assertEqual(self.remove_metadata_columns_from_rows(table_one), expected_table_one)
else:
self.assertEqual(table_one, expected_table_one)
#################################
# TESTS #
#################################
def test_invalid_json(self):
"""Receiving invalid JSONs should raise an exception"""
tap_lines = test_utils.get_test_tap_lines('invalid-json.json')
with self.assertRaises(json.decoder.JSONDecodeError):
self.persist_lines_with_cache(tap_lines)
def test_message_order(self):
"""RECORD message without a previously received SCHEMA message should raise an exception"""
tap_lines = test_utils.get_test_tap_lines('invalid-message-order.json')
with self.assertRaises(Exception):
self.persist_lines_with_cache(tap_lines)
def test_run_query(self):
"""Running SQLs"""
snowflake = DbSync(self.config)
# Running single SQL should return as array
self.assertEqual(snowflake.query("SELECT 1 col1, 2 col2"),
[{'COL1': 1, 'COL2': 2}])
# Running multiple SQLs should return the result of the last query
self.assertEqual(snowflake.query(["SELECT 1 col1, 2 col2",
"SELECT 3 col1, 4 col2",
"SELECT 5 col1, 6 col2"]),
[{'COL1': 5, 'COL2': 6}])
# Running multiple SQLs should return empty list if the last query returns zero record
self.assertEqual(snowflake.query(["SELECT 1 col1, 2 col2",
"SELECT 3 col1, 4 col2",
"SELECT 5 col1, 6 col2 WHERE 1 = 2"]),
[])
# Running multiple SQLs should return the result of the last query even if a previous query returns zero record
self.assertEqual(snowflake.query(["SELECT 1 col1, 2 col2 WHERE 1 =2 ",
"SELECT 3 col1, 4 col2",
"SELECT 5 col1, 6 col2"]),
[{'COL1': 5, 'COL2': 6}])
# Running multiple SQLs should return empty list if every query returns zero record
self.assertEqual(snowflake.query(["SELECT 1 col1, 2 col2 WHERE 1 = 2 ",
"SELECT 3 col1, 4 col2 WHERE 1 = 2",
"SELECT 5 col1, 6 col2 WHERE 1 = 2"]),
[])
def test_loading_tables_with_no_encryption(self):
"""Loading multiple tables from the same input tap with various columns types"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning off client-side encryption and load
self.config['client_side_encryption_master_key'] = ''
self.persist_lines_with_cache(tap_lines)
self.assert_three_streams_are_into_snowflake()
def test_loading_tables_with_client_side_encryption(self):
"""Loading multiple tables from the same input tap with various columns types"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning on client-side encryption and load
self.config['client_side_encryption_master_key'] = os.environ.get('CLIENT_SIDE_ENCRYPTION_MASTER_KEY')
self.persist_lines_with_cache(tap_lines)
self.assert_three_streams_are_into_snowflake()
def test_loading_tables_with_client_side_encryption_and_wrong_master_key(self):
"""Loading multiple tables from the same input tap with various columns types"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning on client-side encryption and load but using a well formatted but wrong master key
self.config['client_side_encryption_master_key'] = "Wr0n6m45t3rKeY0123456789a0123456789a0123456="
with self.assertRaises(ProgrammingError):
self.persist_lines_with_cache(tap_lines)
def test_loading_tables_with_metadata_columns(self):
"""Loading multiple tables from the same input tap with various columns types"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning on adding metadata columns
self.config['add_metadata_columns'] = True
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_three_streams_are_into_snowflake(should_metadata_columns_exist=True)
def test_loading_tables_with_defined_parallelism(self):
"""Loading multiple tables from the same input tap with various columns types"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Using fixed 1 thread parallelism
self.config['parallelism'] = 1
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_three_streams_are_into_snowflake()
def test_loading_tables_with_hard_delete(self):
"""Loading multiple tables from the same input tap with deleted rows"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_three_streams_are_into_snowflake(
should_metadata_columns_exist=True,
should_hard_deleted_rows=True
)
def test_loading_with_multiple_schema(self):
"""Loading table with multiple SCHEMA messages"""
tap_lines = test_utils.get_test_tap_lines('messages-with-multi-schemas.json')
# Load with default settings
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly
self.assert_three_streams_are_into_snowflake(
should_metadata_columns_exist=False,
should_hard_deleted_rows=False
)
def test_loading_tables_with_binary_columns_and_hard_delete(self):
"""Loading multiple tables from the same input tap with deleted rows"""
tap_lines = test_utils.get_test_tap_lines('messages-with-binary-columns.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_binary_data_are_in_snowflake(
table_name='test_binary',
should_metadata_columns_exist=True
)
def test_loading_table_with_reserved_word_as_name_and_hard_delete(self):
"""Loading a table where the name is a reserved word with deleted rows"""
tap_lines = test_utils.get_test_tap_lines('messages-with-reserved-name-as-table-name.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_binary_data_are_in_snowflake(
table_name='"ORDER"',
should_metadata_columns_exist=True
)
def test_loading_table_with_space(self):
"""Loading a table where the name has space"""
tap_lines = test_utils.get_test_tap_lines('messages-with-space-in-table-name.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_binary_data_are_in_snowflake(
table_name='"TABLE WITH SPACE AND UPPERCASE"',
should_metadata_columns_exist=True
)
def test_loading_unicode_characters(self):
"""Loading unicode encoded characters"""
tap_lines = test_utils.get_test_tap_lines('messages-with-unicode-characters.json')
# Load with default settings
self.persist_lines_with_cache(tap_lines)
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_unicode = snowflake.query("SELECT * FROM {}.test_table_unicode ORDER BY C_INT".format(target_schema))
self.assertEqual(
table_unicode,
[
{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': 'Hello world, Καλημέρα κόσμε, コンニチハ'},
{'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': 'Chinese: 和毛泽东 <<重上井冈山>>. 严永欣, 一九八八年.'},
{'C_INT': 3, 'C_PK': 3,
'C_VARCHAR': 'Russian: Зарегистрируйтесь сейчас на Десятую Международную Конференцию по'},
{'C_INT': 4, 'C_PK': 4, 'C_VARCHAR': 'Thai: แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช'},
{'C_INT': 5, 'C_PK': 5, 'C_VARCHAR': 'Arabic: لقد لعبت أنت وأصدقاؤك لمدة وحصلتم علي من إجمالي النقاط'},
{'C_INT': 6, 'C_PK': 6, 'C_VARCHAR': 'Special Characters: [",\'!@£$%^&*()]'}
])
def test_non_db_friendly_columns(self):
"""Loading non-db friendly columns like, camelcase, minus signs, etc."""
tap_lines = test_utils.get_test_tap_lines('messages-with-non-db-friendly-columns.json')
# Load with default settings
self.persist_lines_with_cache(tap_lines)
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_non_db_friendly_columns = snowflake.query(
"SELECT * FROM {}.test_table_non_db_friendly_columns ORDER BY c_pk".format(target_schema))
self.assertEqual(
table_non_db_friendly_columns,
[
{'C_PK': 1, 'CAMELCASECOLUMN': 'Dummy row 1', 'MINUS-COLUMN': 'Dummy row 1'},
{'C_PK': 2, 'CAMELCASECOLUMN': 'Dummy row 2', 'MINUS-COLUMN': 'Dummy row 2'},
{'C_PK': 3, 'CAMELCASECOLUMN': 'Dummy row 3', 'MINUS-COLUMN': 'Dummy row 3'},
{'C_PK': 4, 'CAMELCASECOLUMN': 'Dummy row 4', 'MINUS-COLUMN': 'Dummy row 4'},
{'C_PK': 5, 'CAMELCASECOLUMN': 'Dummy row 5', 'MINUS-COLUMN': 'Dummy row 5'},
])
def test_nested_schema_unflattening(self):
"""Loading nested JSON objects into VARIANT columns without flattening"""
tap_lines = test_utils.get_test_tap_lines('messages-with-nested-schema.json')
# Load with default settings - Flattening disabled
self.persist_lines_with_cache(tap_lines)
# Get loaded rows from tables - Transform JSON to string at query time
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
unflattened_table = snowflake.query("""
SELECT c_pk
,TO_CHAR(c_array) c_array
,TO_CHAR(c_object) c_object
,TO_CHAR(c_object) c_object_with_props
,TO_CHAR(c_nested_object) c_nested_object
FROM {}.test_table_nested_schema
ORDER BY c_pk""".format(target_schema))
# Should be valid nested JSON strings
self.assertEqual(
unflattened_table,
[{
'C_PK': 1,
'C_ARRAY': '[1,2,3]',
'C_OBJECT': '{"key_1":"value_1"}',
'C_OBJECT_WITH_PROPS': '{"key_1":"value_1"}',
'C_NESTED_OBJECT': '{"nested_prop_1":"nested_value_1","nested_prop_2":"nested_value_2","nested_prop_3":{"multi_nested_prop_1":"multi_value_1","multi_nested_prop_2":"multi_value_2"}}'
}])
def test_nested_schema_flattening(self):
"""Loading nested JSON objects with flattening and not not flattening"""
tap_lines = test_utils.get_test_tap_lines('messages-with-nested-schema.json')
# Turning on data flattening
self.config['data_flattening_max_level'] = 10
# Load with default settings - Flattening disabled
self.persist_lines_with_cache(tap_lines)
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
flattened_table = snowflake.query(
"SELECT * FROM {}.test_table_nested_schema ORDER BY c_pk".format(target_schema))
# Should be flattened columns
self.assertEqual(
flattened_table,
[{
'C_PK': 1,
'C_ARRAY': '[\n 1,\n 2,\n 3\n]',
'C_OBJECT': None,
# Cannot map RECORD to SCHEMA. SCHEMA doesn't have properties that requires for flattening
'C_OBJECT_WITH_PROPS__KEY_1': 'value_1',
'C_NESTED_OBJECT__NESTED_PROP_1': 'nested_value_1',
'C_NESTED_OBJECT__NESTED_PROP_2': 'nested_value_2',
'C_NESTED_OBJECT__NESTED_PROP_3__MULTI_NESTED_PROP_1': 'multi_value_1',
'C_NESTED_OBJECT__NESTED_PROP_3__MULTI_NESTED_PROP_2': 'multi_value_2',
}])
def test_column_name_change(self):
"""Tests correct renaming of snowflake columns after source change"""
tap_lines_before_column_name_change = test_utils.get_test_tap_lines('messages-with-three-streams.json')
tap_lines_after_column_name_change = test_utils.get_test_tap_lines(
'messages-with-three-streams-modified-column.json')
# Load with default settings
self.persist_lines_with_cache(tap_lines_before_column_name_change)
self.persist_lines_with_cache(tap_lines_after_column_name_change)
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_one = snowflake.query("SELECT * FROM {}.test_table_one ORDER BY c_pk".format(target_schema))
table_two = snowflake.query("SELECT * FROM {}.test_table_two ORDER BY c_pk".format(target_schema))
table_three = snowflake.query("SELECT * FROM {}.test_table_three ORDER BY c_pk".format(target_schema))
# Get the previous column name from information schema in test_table_two
previous_column_name = snowflake.query("""
SELECT column_name
FROM information_schema.columns
WHERE table_catalog = '{}'
AND table_schema = '{}'
AND table_name = 'TEST_TABLE_TWO'
AND ordinal_position = 1
""".format(
self.config.get('dbname', '').upper(),
target_schema.upper()))[0]["COLUMN_NAME"]
# Table one should have no changes
self.assertEqual(
table_one,
[{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1'}])
# Table two should have a versioned column and a new column
self.assertEqual(
table_two,
[
{previous_column_name: datetime.datetime(2019, 2, 1, 15, 12, 45), 'C_INT': 1, 'C_PK': 1,
'C_VARCHAR': '1', 'C_DATE': None, 'C_NEW_COLUMN': None},
{previous_column_name: datetime.datetime(2019, 2, 10, 2), 'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2',
'C_DATE': '2019-02-12 02:00:00', 'C_NEW_COLUMN': 'data 1'},
{previous_column_name: None, 'C_INT': 3, 'C_PK': 3, 'C_VARCHAR': '2', 'C_DATE': '2019-02-15 02:00:00',
'C_NEW_COLUMN': 'data 2'}
]
)
# Table three should have a renamed columns and a new column
self.assertEqual(
table_three,
[
{'C_INT': 1, 'C_PK': 1, 'C_TIME': datetime.time(4, 0), 'C_VARCHAR': '1', 'C_TIME_RENAMED': None,
'C_NEW_COLUMN': None},
{'C_INT': 2, 'C_PK': 2, 'C_TIME': datetime.time(7, 15), 'C_VARCHAR': '2', 'C_TIME_RENAMED': None,
'C_NEW_COLUMN': None},
{'C_INT': 3, 'C_PK': 3, 'C_TIME': datetime.time(23, 0, 3), 'C_VARCHAR': '3',
'C_TIME_RENAMED': datetime.time(8, 15), 'C_NEW_COLUMN': 'data 1'},
{'C_INT': 4, 'C_PK': 4, 'C_TIME': None, 'C_VARCHAR': '4', 'C_TIME_RENAMED': datetime.time(23, 0, 3),
'C_NEW_COLUMN': 'data 2'}
])
def test_column_name_change_without_table_cache(self):
"""Tests correct renaming of snowflake columns after source change with not using table caching"""
tap_lines_before_column_name_change = test_utils.get_test_tap_lines('messages-with-three-streams.json')
tap_lines_after_column_name_change = test_utils.get_test_tap_lines(
'messages-with-three-streams-modified-column.json')
# Load with default settings
self.persist_lines(tap_lines_before_column_name_change)
self.persist_lines(tap_lines_after_column_name_change)
# Get loaded rows from tables
snowflake = DbSync(self.config)
target_schema = self.config.get('default_target_schema', '')
table_one = snowflake.query("SELECT * FROM {}.test_table_one ORDER BY c_pk".format(target_schema))
table_two = snowflake.query("SELECT * FROM {}.test_table_two ORDER BY c_pk".format(target_schema))
table_three = snowflake.query("SELECT * FROM {}.test_table_three ORDER BY c_pk".format(target_schema))
# Get the previous column name from information schema in test_table_two
previous_column_name = snowflake.query("""
SELECT column_name
FROM information_schema.columns
WHERE table_catalog = '{}'
AND table_schema = '{}'
AND table_name = 'TEST_TABLE_TWO'
AND ordinal_position = 1
""".format(
self.config.get('dbname', '').upper(),
target_schema.upper()))[0]["COLUMN_NAME"]
# Table one should have no changes
self.assertEqual(
table_one,
[{'C_INT': 1, 'C_PK': 1, 'C_VARCHAR': '1'}])
# Table two should have a versioned column and a new column
self.assertEqual(
table_two,
[
{previous_column_name: datetime.datetime(2019, 2, 1, 15, 12, 45), 'C_INT': 1, 'C_PK': 1,
'C_VARCHAR': '1', 'C_DATE': None, 'C_NEW_COLUMN': None},
{previous_column_name: datetime.datetime(2019, 2, 10, 2), 'C_INT': 2, 'C_PK': 2, 'C_VARCHAR': '2',
'C_DATE': '2019-02-12 02:00:00', 'C_NEW_COLUMN': 'data 1'},
{previous_column_name: None, 'C_INT': 3, 'C_PK': 3, 'C_VARCHAR': '2', 'C_DATE': '2019-02-15 02:00:00',
'C_NEW_COLUMN': 'data 2'}
]
)
# Table three should have a renamed columns and a new column
self.assertEqual(
table_three,
[
{'C_INT': 1, 'C_PK': 1, 'C_TIME': datetime.time(4, 0), 'C_VARCHAR': '1', 'C_TIME_RENAMED': None,
'C_NEW_COLUMN': None},
{'C_INT': 2, 'C_PK': 2, 'C_TIME': datetime.time(7, 15), 'C_VARCHAR': '2', 'C_TIME_RENAMED': None,
'C_NEW_COLUMN': None},
{'C_INT': 3, 'C_PK': 3, 'C_TIME': datetime.time(23, 0, 3), 'C_VARCHAR': '3',
'C_TIME_RENAMED': datetime.time(8, 15), 'C_NEW_COLUMN': 'data 1'},
{'C_INT': 4, 'C_PK': 4, 'C_TIME': None, 'C_VARCHAR': '4', 'C_TIME_RENAMED': datetime.time(23, 0, 3),
'C_NEW_COLUMN': 'data 2'}
])
def test_logical_streams_from_pg_with_hard_delete_and_default_batch_size_should_pass(self):
"""Tests logical streams from pg with inserts, updates and deletes"""
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.persist_lines_with_cache(tap_lines)
self.assert_logical_streams_are_in_snowflake(True)
def test_logical_streams_from_pg_with_hard_delete_and_batch_size_of_5_should_pass(self):
"""Tests logical streams from pg with inserts, updates and deletes"""
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 5
self.persist_lines_with_cache(tap_lines)
self.assert_logical_streams_are_in_snowflake(True)
def test_logical_streams_from_pg_with_hard_delete_and_batch_size_of_5_and_no_records_should_pass(self):
"""Tests logical streams from pg with inserts, updates and deletes"""
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams-no-records.json')
# Turning on hard delete mode
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 5
self.persist_lines_with_cache(tap_lines)
self.assert_logical_streams_are_in_snowflake_and_are_empty()
@mock.patch('target_snowflake.emit_state')
def test_flush_streams_with_no_intermediate_flushes(self, mock_emit_state):
"""Test emitting states when no intermediate flush required"""
mock_emit_state.get.return_value = None
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
# Set batch size big enough to never has to flush in the middle
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 1000
self.persist_lines_with_cache(tap_lines)
# State should be emitted only once with the latest received STATE message
self.assertEqual(
mock_emit_state.mock_calls,
[
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}})
])
# Every table should be loaded correctly
self.assert_logical_streams_are_in_snowflake(True)
@mock.patch('target_snowflake.emit_state')
def test_flush_streams_with_intermediate_flushes(self, mock_emit_state):
"""Test emitting states when intermediate flushes required"""
mock_emit_state.get.return_value = None
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
# Set batch size small enough to trigger multiple stream flushes
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 10
self.persist_lines_with_cache(tap_lines)
# State should be emitted multiple times, updating the positions only in the stream which got flushed
self.assertEqual(
mock_emit_state.call_args_list,
[
# Flush #1 - Flushed edgydata until lsn: 108197216
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #2 - Flushed logical1-logical1_table2 until lsn: 108201336
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108201336, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #3 - Flushed logical1-logical1_table2 until lsn: 108237600
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108237600, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #4 - Flushed logical1-logical1_table2 until lsn: 108238768
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108238768, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #5 - Flushed logical1-logical1_table2 until lsn: 108239704,
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108239896, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108196176, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #6 - Last flush, update every stream lsn: 108240872,
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
])
# Every table should be loaded correctly
self.assert_logical_streams_are_in_snowflake(True)
@mock.patch('target_snowflake.emit_state')
def test_flush_streams_with_intermediate_flushes_on_all_streams(self, mock_emit_state):
"""Test emitting states when intermediate flushes required and flush_all_streams is enabled"""
mock_emit_state.get.return_value = None
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
# Set batch size small enough to trigger multiple stream flushes
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 10
self.config['flush_all_streams'] = True
self.persist_lines_with_cache(tap_lines)
# State should be emitted 6 times, flushing every stream and updating every stream position
self.assertEqual(
mock_emit_state.call_args_list,
[
# Flush #1 - Flush every stream until lsn: 108197216
mock.call({"currently_syncing": None, "bookmarks": {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108197216, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #2 - Flush every stream until lsn 108201336
mock.call({'currently_syncing': None, 'bookmarks': {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108201336, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108201336, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108201336, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108201336, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #3 - Flush every stream until lsn: 108237600
mock.call({'currently_syncing': None, 'bookmarks': {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108237600, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108237600, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108237600, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108237600, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #4 - Flush every stream until lsn: 108238768
mock.call({'currently_syncing': None, 'bookmarks': {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108238768, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108238768, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108238768, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108238768, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #5 - Flush every stream until lsn: 108239704,
mock.call({'currently_syncing': None, 'bookmarks': {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108239896, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108239896, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108239896, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108239896, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
# Flush #6 - Last flush, update every stream until lsn: 108240872,
mock.call({'currently_syncing': None, 'bookmarks': {
"logical1-logical1_edgydata": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723596, "xmin": None},
"logical1-logical1_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723618, "xmin": None},
"logical1-logical1_table2": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723635, "xmin": None},
"logical2-logical2_table1": {"last_replication_method": "LOG_BASED", "lsn": 108240872, "version": 1570922723651, "xmin": None},
"public-city": {"last_replication_method": "INCREMENTAL", "replication_key": "id", "version": 1570922723667, "replication_key_value": 4079},
"public-country": {"last_replication_method": "FULL_TABLE", "version": 1570922730456, "xmin": None},
"public2-wearehere": {}}}),
])
# Every table should be loaded correctly
self.assert_logical_streams_are_in_snowflake(True)
@mock.patch('target_snowflake.emit_state')
def test_flush_streams_based_on_batch_wait_limit(self, mock_emit_state):
"""Tests logical streams from pg with inserts, updates and deletes"""
tap_lines = test_utils.get_test_tap_lines('messages-pg-logical-streams.json')
mock_emit_state.get.return_value = None
self.config['hard_delete'] = True
self.config['batch_size_rows'] = 1000
self.config['batch_wait_limit_seconds'] = 0.1
self.persist_lines_with_cache(tap_lines)
self.assert_logical_streams_are_in_snowflake(True)
self.assertGreater(mock_emit_state.call_count, 1, 'Expecting multiple flushes')
def test_record_validation(self):
"""Test validating records"""
tap_lines = test_utils.get_test_tap_lines('messages-with-invalid-records.json')
# Loading invalid records when record validation enabled should fail at ...
self.config['validate_records'] = True
with self.assertRaises(RecordValidationException):
self.persist_lines_with_cache(tap_lines)
# Loading invalid records when record validation disabled should fail at load time
self.config['validate_records'] = False
if self.config['file_format'] == os.environ.get('TARGET_SNOWFLAKE_FILE_FORMAT_CSV'):
with self.assertRaises(ProgrammingError):
self.persist_lines_with_cache(tap_lines)
if self.config['file_format'] == os.environ.get('TARGET_SNOWFLAKE_FILE_FORMAT_PARQUET'):
with self.assertRaises(ArrowTypeError):
self.persist_lines_with_cache(tap_lines)
def test_pg_records_validation(self):
"""Test validating records from postgres tap"""
tap_lines_invalid_records = test_utils.get_test_tap_lines('messages-pg-with-invalid-records.json')
# Loading invalid records when record validation enabled should fail at ...
self.config['validate_records'] = True
with self.assertRaises(RecordValidationException):
self.persist_lines_with_cache(tap_lines_invalid_records)
# Loading invalid records when record validation disabled, should pass without any exceptions
self.config['validate_records'] = False
self.persist_lines_with_cache(tap_lines_invalid_records)
# Valid records should pass for both with and without validation
tap_lines_valid_records = test_utils.get_test_tap_lines('messages-pg-with-valid-records.json')
self.config['validate_records'] = True
self.persist_lines_with_cache(tap_lines_valid_records)
def test_loading_tables_with_custom_temp_dir(self):
"""Loading multiple tables from the same input tap using custom temp directory"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning on client-side encryption and load
self.config['temp_dir'] = ('~/.pipelinewise/tmp')
self.persist_lines_with_cache(tap_lines)
self.assert_three_streams_are_into_snowflake()
def test_aws_env_vars(self):
"""Test loading data with credentials defined in AWS environment variables
than explicitly provided access keys"""
tap_lines = test_utils.get_test_tap_lines("messages-with-three-streams.json")
try:
# Save original config to restore later
orig_config = self.config.copy()
# Move aws access key and secret from config into environment variables
os.environ['AWS_ACCESS_KEY_ID'] = os.environ.get('TARGET_SNOWFLAKE_AWS_ACCESS_KEY')
os.environ['AWS_SECRET_ACCESS_KEY'] = os.environ.get('TARGET_SNOWFLAKE_AWS_SECRET_ACCESS_KEY')
del self.config['aws_access_key_id']
del self.config['aws_secret_access_key']
# Create a new S3 client using env vars
s3Client = S3UploadClient(self.config)
s3Client._create_s3_client()
# Restore the original state to not confuse other tests
finally:
del os.environ['AWS_ACCESS_KEY_ID']
del os.environ['AWS_SECRET_ACCESS_KEY']
self.config = orig_config.copy()
def test_profile_based_auth(self):
"""Test AWS profile based authentication rather than access keys"""
try:
# Save original config to restore later
orig_config = self.config.copy()
# Remove access keys from config and add profile name
del self.config['aws_access_key_id']
del self.config['aws_secret_access_key']
self.config['aws_profile'] = 'fake-profile'
# Create a new S3 client using profile based authentication
with self.assertRaises(botocore.exceptions.ProfileNotFound):
s3UploaddClient = S3UploadClient(self.config)
s3UploaddClient._create_s3_client()
# Restore the original state to not confuse other tests
finally:
self.config = orig_config.copy()
def test_profile_based_auth_aws_env_var(self):
"""Test AWS profile based authentication using AWS environment variables"""
try:
# Save original config to restore later
orig_config = self.config.copy()
# Remove access keys from config and add profile name environment variable
del self.config['aws_access_key_id']
del self.config['aws_secret_access_key']
os.environ['AWS_PROFILE'] = 'fake_profile'
# Create a new S3 client using profile based authentication
with self.assertRaises(botocore.exceptions.ProfileNotFound):
s3UploaddClient = S3UploadClient(self.config)
s3UploaddClient._create_s3_client()
# Restore the original state to not confuse other tests
finally:
del os.environ['AWS_PROFILE']
self.config = orig_config.copy()
def test_s3_custom_endpoint_url(self):
"""Test S3 connection with custom region and endpoint URL"""
try:
# Save original config to restore later
orig_config = self.config.copy()
# Define custom S3 endpoint
self.config['s3_endpoint_url'] = 'fake-endpoint-url'
# Botocore should raise ValurError in case of fake S3 endpoint url
with self.assertRaises(ValueError):
s3UploaddClient = S3UploadClient(self.config)
s3UploaddClient._create_s3_client()
# Restore the original state to not confuse other tests
finally:
self.config = orig_config.copy()
def test_too_many_records_exception(self):
"""Test if query function raise exception if max_records exceeded"""
snowflake = DbSync(self.config)
# No max_record limit by default
sample_rows = snowflake.query("SELECT seq4() FROM TABLE(GENERATOR(ROWCOUNT => 50000))")
self.assertEqual(len(sample_rows), 50000)
# Should raise exception when max_records exceeded
with self.assertRaises(target_snowflake.db_sync.TooManyRecordsException):
snowflake.query("SELECT seq4() FROM TABLE(GENERATOR(ROWCOUNT => 50000))", max_records=10000)
def test_loading_tables_with_no_compression(self):
"""Loading multiple tables with compression turned off"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Turning off client-side encryption and load
self.config['no_compression'] = True
self.persist_lines_with_cache(tap_lines)
self.assert_three_streams_are_into_snowflake()
def test_quoted_identifiers_ignore_case_session_parameter(self):
"""Test if QUOTED_IDENTIFIERS_IGNORE_CASE session parameter set to FALSE"""
snowflake = DbSync(self.config)
# Set QUOTED_IDENTIFIERS_IGNORE_CASE to True on user level
snowflake.query(f"ALTER USER {self.config['user']} SET QUOTED_IDENTIFIERS_IGNORE_CASE = TRUE")
# Quoted column names should be case sensitive even if the
# QUOTED_IDENTIFIERS_IGNORE_CASE parameter set to TRUE on user or account level
result = snowflake.query('SELECT 1 AS "Foo", 1 AS "foo", 1 AS "FOO", 1 AS foo, 1 AS FOO')
self.assertEqual(result, [{
'Foo': 1,
'foo': 1,
'FOO': 1
}])
# Reset parameters default
snowflake.query(f"ALTER USER {self.config['user']} UNSET QUOTED_IDENTIFIERS_IGNORE_CASE")
def test_query_tagging(self):
"""Loading multiple tables with query tagging"""
snowflake = DbSync(self.config)
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
current_time = datetime.datetime.now().strftime('%H:%M:%s')
# Tag queries with dynamic schema and table tokens
self.config['query_tag'] = f'PPW test tap run at {current_time}. ' \
f'Loading into {{{{database}}}}.{{{{schema}}}}.{{{{table}}}}'
self.persist_lines_with_cache(tap_lines)
# Get query tags from QUERY_HISTORY
result = snowflake.query(f"""SELECT query_tag, count(*) queries
FROM table(information_schema.query_history_by_user('{self.config['user']}'))
WHERE query_tag like '%%PPW test tap run at {current_time}%%'
GROUP BY query_tag
ORDER BY 1""")
target_db = self.config['dbname']
target_schema = self.config['default_target_schema']
self.assertEqual(result, [{
'QUERY_TAG': f'PPW test tap run at {current_time}. Loading into {target_db}..',
'QUERIES': 4
},
{
'QUERY_TAG': f'PPW test tap run at {current_time}. Loading into {target_db}.{target_schema}.TEST_TABLE_ONE',
'QUERIES': 7
},
{
'QUERY_TAG': f'PPW test tap run at {current_time}. Loading into {target_db}.{target_schema}.TEST_TABLE_THREE',
'QUERIES': 6
},
{
'QUERY_TAG': f'PPW test tap run at {current_time}. Loading into {target_db}.{target_schema}.TEST_TABLE_TWO',
'QUERIES': 6
}
])
# Detecting file format type should run only once
result = snowflake.query(f"""SELECT count(*) show_file_format_queries
FROM table(information_schema.query_history_by_user('{self.config['user']}'))
WHERE query_tag like '%%PPW test tap run at {current_time}%%'
AND query_text like 'SHOW FILE FORMATS%%'""")
self.assertEqual(result, [{
'SHOW_FILE_FORMAT_QUERIES': 1
}
])
def test_table_stage(self):
"""Test if data can be loaded via table stages"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Set s3_bucket and stage to None to use table stages
self.config['s3_bucket'] = None
self.config['stage'] = None
# Table stages should work with CSV files
self.config['file_format'] = os.environ.get('TARGET_SNOWFLAKE_FILE_FORMAT_CSV')
self.persist_lines_with_cache(tap_lines)
self.assert_three_streams_are_into_snowflake()
# Table stages should not work with Parquet files
self.config['file_format'] = os.environ.get('TARGET_SNOWFLAKE_FILE_FORMAT_PARQUET')
with self.assertRaises(SystemExit):
self.persist_lines_with_cache(tap_lines)
def test_custom_role(self):
"""Test if custom role can be used"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Set custom role
self.config['role'] = 'invalid-not-existing-role'
# Using not existing or not authorized role should raise snowflake Database exception:
# 250001 (08001): Role 'INVALID-ROLE' specified in the connect string does not exist or not authorized.
with self.assertRaises(DatabaseError):
self.persist_lines_with_cache(tap_lines)
def test_parsing_date_failure(self):
"""Test if custom role can be used"""
tap_lines = test_utils.get_test_tap_lines('messages-with-unexpected-types.json')
with self.assertRaises(target_snowflake.UnexpectedValueTypeException):
self.persist_lines_with_cache(tap_lines)
def test_parquet(self):
"""Test if parquet file can be loaded"""
tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json')
# Set parquet file format
self.config['file_format'] = os.environ.get('TARGET_SNOWFLAKE_FILE_FORMAT_PARQUET')
self.persist_lines_with_cache(tap_lines)
# Check if data loaded correctly and metadata columns exist
self.assert_three_streams_are_into_snowflake()
def test_archive_load_files(self):
"""Test if load file is copied to archive folder"""
self.config['archive_load_files'] = True
self.config['archive_load_files_s3_prefix'] = 'archive_folder'
self.config['tap_id'] = 'test_tap_id'
self.config['client_side_encryption_master_key'] = ''
s3_bucket = self.config['s3_bucket']
# Delete any dangling files from archive
files_in_s3_archive = self.s3_client.list_objects(
Bucket=s3_bucket, Prefix="archive_folder/test_tap_id/").get('Contents', [])
for file_in_archive in files_in_s3_archive:
key = file_in_archive["Key"]
self.s3_client.delete_object(Bucket=s3_bucket, Key=key)
tap_lines = test_utils.get_test_tap_lines('messages-simple-table.json')
self.persist_lines_with_cache(tap_lines)
# Verify expected file metadata in S3
files_in_s3_archive = self.s3_client.list_objects(Bucket=s3_bucket, Prefix="archive_folder/test_tap_id/").get(
'Contents')
self.assertIsNotNone(files_in_s3_archive)
self.assertEqual(1, len(files_in_s3_archive))
archived_file_key = files_in_s3_archive[0]['Key']
archive_metadata = self.s3_client.head_object(Bucket=s3_bucket, Key=archived_file_key)['Metadata']
self.assertEqual({
'tap': 'test_tap_id',
'schema': 'tap_mysql_test',
'table': 'test_simple_table',
'archived-by': 'pipelinewise_target_snowflake',
'incremental-key': 'id',
'incremental-key-min': '1',
'incremental-key-max': '5'
}, archive_metadata)
# Verify expected file contents
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, 'wb') as f:
self.s3_client.download_fileobj(s3_bucket, archived_file_key, f)
lines = []
with gzip.open(tmpfile, "rt") as gzipfile:
for line in gzipfile.readlines():
lines.append(line)
self.assertEqual(''.join(lines), '''1,"xyz1","not-formatted-time-1"
2,"xyz2","not-formatted-time-2"
3,"xyz3","not-formatted-time-3"
4,"xyz4","not-formatted-time-4"
5,"xyz5","not-formatted-time-5"
''')
| 53.743715
| 198
| 0.62397
| 7,783
| 66,266
| 5.03392
| 0.08146
| 0.024503
| 0.041808
| 0.031854
| 0.78708
| 0.753848
| 0.724266
| 0.701907
| 0.675132
| 0.648077
| 0
| 0.050802
| 0.246084
| 66,266
| 1,232
| 199
| 53.787338
| 0.733261
| 0.168669
| 0
| 0.534969
| 0
| 0.09816
| 0.301943
| 0.128759
| 0
| 0
| 0
| 0
| 0.112883
| 1
| 0.063804
| false
| 0.003681
| 0.02454
| 0
| 0.092025
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a00cc8bf1d209504a925aa2aef502dd4989faa1
| 90
|
py
|
Python
|
customers/schema/__init__.py
|
dionyself/leaky
|
d31d8dcd4e9f8fdadd3c504140097670b7d6be4d
|
[
"Apache-2.0"
] | 1
|
2020-01-22T02:56:47.000Z
|
2020-01-22T02:56:47.000Z
|
customers/schema/__init__.py
|
dionyself/leaky
|
d31d8dcd4e9f8fdadd3c504140097670b7d6be4d
|
[
"Apache-2.0"
] | null | null | null |
customers/schema/__init__.py
|
dionyself/leaky
|
d31d8dcd4e9f8fdadd3c504140097670b7d6be4d
|
[
"Apache-2.0"
] | null | null | null |
from .mutation import Mutation
from .query import Query
__all__ = ['Mutation', 'Query']
| 15
| 31
| 0.733333
| 11
| 90
| 5.636364
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 5
| 32
| 18
| 0.815789
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a0827a00f4c7becd63006f70459cdf0511c53b3
| 304
|
py
|
Python
|
live_recorder/you_live/__init__.py
|
lyzhang0113/LiveRecorder
|
a4e59e248fc963a5dcad8f24be916462bf43f657
|
[
"MIT"
] | null | null | null |
live_recorder/you_live/__init__.py
|
lyzhang0113/LiveRecorder
|
a4e59e248fc963a5dcad8f24be916462bf43f657
|
[
"MIT"
] | 7
|
2020-12-07T16:39:52.000Z
|
2020-12-07T17:00:55.000Z
|
live_recorder/you_live/__init__.py
|
h-nakashima/LiveRecorder
|
6cbb463978d2ca7d4f3577b92cb734a4389ea923
|
[
"MIT"
] | null | null | null |
from .douyu_recorder import DouyuRecorder
from .bili_recorder import BiliRecorder
from .kuaishou_recorder import KuaishouRecorder
from .flv_checker import Flv
from .live_thread.download import DownloadThread
from .live_thread.monitoring import MonitoringThread
from . import _recorder as Recorder
| 38
| 53
| 0.848684
| 37
| 304
| 6.783784
| 0.486486
| 0.167331
| 0.111554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121711
| 304
| 7
| 54
| 43.428571
| 0.940075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a0a0c0c218d907fa41fda16b9633c2bd2099592
| 369
|
py
|
Python
|
agent/states/state.py
|
jamesrosstwo/battlesnake
|
c37d38101d0df49c703e2d9df53a4e8678b9ecd5
|
[
"MIT"
] | null | null | null |
agent/states/state.py
|
jamesrosstwo/battlesnake
|
c37d38101d0df49c703e2d9df53a4e8678b9ecd5
|
[
"MIT"
] | null | null | null |
agent/states/state.py
|
jamesrosstwo/battlesnake
|
c37d38101d0df49c703e2d9df53a4e8678b9ecd5
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from agent.actions.action import BattleSnakeAction
class BattleSnakeState(ABC):
def __init__(self):
pass
@abstractmethod
def enter(self, entity):
pass
@abstractmethod
def execute(self, entity) -> BattleSnakeAction:
pass
@abstractmethod
def exit(self, entity):
pass
| 16.043478
| 51
| 0.661247
| 37
| 369
| 6.486486
| 0.486486
| 0.225
| 0.2625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268293
| 369
| 22
| 52
| 16.772727
| 0.888889
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.285714
| 0.142857
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
8a1201c631c44847ffac9c61c14ce7ced66018bb
| 126
|
py
|
Python
|
python/testData/formatter/spaceAfterTrailingCommaIfNoSpaceAfterCommaButWithinBracesOrBrackets.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/formatter/spaceAfterTrailingCommaIfNoSpaceAfterCommaButWithinBracesOrBrackets.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/formatter/spaceAfterTrailingCommaIfNoSpaceAfterCommaButWithinBracesOrBrackets.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
s1 = {1, 2, 3,}
s2 = {1, }
d1 = {'foo': 1, 'bar': 2, 'baz': 3,}
d2 = {'foo': 1,}
d3 = {}
l1 = [1, 2, 3,]
l2 = [1, ]
l3 = []
| 11.454545
| 36
| 0.31746
| 24
| 126
| 1.666667
| 0.583333
| 0.1
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 0.301587
| 126
| 10
| 37
| 12.6
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a31710a4e7028c50a4e31444adbee6f892f2936
| 2,506
|
py
|
Python
|
data/gdelt/service/simsearch-multiple-sources.py
|
smartdatalake/simsearch
|
9e0ffc0b9da0c77f798cca4d21c7fb834e96ca6d
|
[
"Apache-2.0"
] | 3
|
2020-07-28T14:59:41.000Z
|
2022-03-07T03:58:50.000Z
|
data/gdelt/service/simsearch-multiple-sources.py
|
smartdatalake/simsearch
|
9e0ffc0b9da0c77f798cca4d21c7fb834e96ca6d
|
[
"Apache-2.0"
] | 1
|
2021-04-28T15:16:50.000Z
|
2021-04-28T15:16:50.000Z
|
data/gdelt/service/simsearch-multiple-sources.py
|
smartdatalake/simsearch
|
9e0ffc0b9da0c77f798cca4d21c7fb834e96ca6d
|
[
"Apache-2.0"
] | 1
|
2021-04-24T00:54:10.000Z
|
2021-04-24T00:54:10.000Z
|
import requests
# Request to the web service to create a new instance of SimSearch
url = 'http://localhost:8090/simsearch/api/index'
# Blueprint JSON specification of the multiple data sources with queryable attributes regarding the same entities having common identifiers.
# Three different types of data sources (each providing one or multiple queryable attributes) are specified in this example:
# (1) JDBC conncetion to a PostgreSQL/PostGIS database;
# (2) a local CSV file; and
# (3) An ElasticSearch REST API service hosted at a remote server.
index = {'sources':[{'name':'localPostGISdatabase','type': 'jdbc','driver': 'org.postgresql.Driver','url': 'jdbc:postgresql://localhost:5432/myDatabase','username':'postgresUserName','password':'postgresPassword','encoding':'UTF-8'}, {'name':'localPath1','type':'csv','directory':'./test/'}, {'name':'remoteElasticSearchAPI','type':'restapi','url':'http://remoteHostIPaddress:9200/myIndexName/_search?pretty','username':'elasticUserName','password':'elasticPassword'}], 'search':[ {'operation':'spatial_knn','source':'localPath1','dataset':'dataset1.csv','header':'true','search_column':'WKT'}, {'operation':'numerical_topk','source':'localPath1','header':'true','dataset':'dataset1.csv','search_column':'positive_sentiment'}, {'operation':'numerical_topk','source':'localPath1','dataset':'dataset1.csv','header':'true','search_column':'negative_sentiment'}, {'operation':'numerical_topk','source':'remoteElasticSearchAPI','dataset':'companies','key_column':'id','search_column':'employees'}, {'operation':'categorical_topk','source':'localPath1','dataset':'dataset2.csv','search_column':'persons'}, {'operation':'numerical_topk','source':'remoteElasticSearchAPI','dataset':'companies','key_column':'id','search_column':'revenue'}, {'operation':'spatial_knn','source':'localPostGISdatabase','dataset':'companies','search_column':'location'}, {'operation':'categorical_topk','source':'localPostGISdatabase','dataset':'companies','search_column':'keywords'}]}
# No API key is required when submitting this request to create a new instance
# A new API key will be generated once this request completes successfully
headers = {'Content-Type' : 'application/json'}
# Post a request with these parameters
response = requests.post(url, json=index, headers=headers)
# Provide the resulting message
# IMPORTANT! Save the API key returned with this message and use it in all subsequent requests against this instance of SimSearch
print(response.json())
| 113.909091
| 1,457
| 0.754988
| 298
| 2,506
| 6.278523
| 0.489933
| 0.051309
| 0.047034
| 0.059861
| 0.275788
| 0.21272
| 0.154997
| 0.154997
| 0.154997
| 0.095136
| 0
| 0.010776
| 0.074222
| 2,506
| 22
| 1,458
| 113.909091
| 0.79569
| 0.32921
| 0
| 0
| 0
| 0
| 0.694012
| 0.077844
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
8a3455b4f88b445a7c8b0d5947bbe4a1ab98eb54
| 75
|
py
|
Python
|
linsheng.py
|
lin8979/myss
|
9d344400fb98df6a32b95480a2e1f9e8936cc8b4
|
[
"Apache-2.0"
] | null | null | null |
linsheng.py
|
lin8979/myss
|
9d344400fb98df6a32b95480a2e1f9e8936cc8b4
|
[
"Apache-2.0"
] | null | null | null |
linsheng.py
|
lin8979/myss
|
9d344400fb98df6a32b95480a2e1f9e8936cc8b4
|
[
"Apache-2.0"
] | null | null | null |
print("hello world " )
print("nihoa nihao" )
print("haofan haoifan" )
| 18.75
| 24
| 0.64
| 9
| 75
| 5.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 75
| 3
| 25
| 25
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.493333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
8a3e32fbca935d3b779ad37acdcbe058668c9a2b
| 35,097
|
py
|
Python
|
tests/test_op.py
|
vvvrrooomm/alembic
|
5d89cf4e77e5568887f2c8926a589c55427dab12
|
[
"MIT"
] | null | null | null |
tests/test_op.py
|
vvvrrooomm/alembic
|
5d89cf4e77e5568887f2c8926a589c55427dab12
|
[
"MIT"
] | null | null | null |
tests/test_op.py
|
vvvrrooomm/alembic
|
5d89cf4e77e5568887f2c8926a589c55427dab12
|
[
"MIT"
] | null | null | null |
"""Test against the builders in the op.* module."""
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import text
from alembic import op
from alembic.operations import ops
from alembic.operations import schemaobj
from alembic.testing import assert_raises_message
from alembic.testing import config
from alembic.testing import eq_
from alembic.testing import is_
from alembic.testing import mock
from alembic.testing.fixtures import op_fixture
from alembic.testing.fixtures import TestBase
@event.listens_for(Table, "after_parent_attach")
def _add_cols(table, metadata):
if table.name == "tbl_with_auto_appended_column":
table.append_column(Column("bat", Integer))
class OpTest(TestBase):
def test_rename_table(self):
context = op_fixture()
op.rename_table("t1", "t2")
context.assert_("ALTER TABLE t1 RENAME TO t2")
def test_rename_table_schema(self):
context = op_fixture()
op.rename_table("t1", "t2", schema="foo")
context.assert_("ALTER TABLE foo.t1 RENAME TO foo.t2")
def test_create_index_no_expr_allowed(self):
op_fixture()
assert_raises_message(
ValueError,
r"String or text\(\) construct expected",
op.create_index,
"name",
"tname",
[func.foo(column("x"))],
)
def test_add_column_schema_hard_quoting(self):
from sqlalchemy.sql.schema import quoted_name
context = op_fixture("postgresql")
op.add_column(
"somename",
Column("colname", String),
schema=quoted_name("some.schema", quote=True),
)
context.assert_(
'ALTER TABLE "some.schema".somename ADD COLUMN colname VARCHAR'
)
def test_rename_table_schema_hard_quoting(self):
from sqlalchemy.sql.schema import quoted_name
context = op_fixture("postgresql")
op.rename_table(
"t1", "t2", schema=quoted_name("some.schema", quote=True)
)
context.assert_('ALTER TABLE "some.schema".t1 RENAME TO t2')
def test_add_constraint_schema_hard_quoting(self):
from sqlalchemy.sql.schema import quoted_name
context = op_fixture("postgresql")
op.create_check_constraint(
"ck_user_name_len",
"user_table",
func.len(column("name")) > 5,
schema=quoted_name("some.schema", quote=True),
)
context.assert_(
'ALTER TABLE "some.schema".user_table ADD '
"CONSTRAINT ck_user_name_len CHECK (len(name) > 5)"
)
def test_create_index_quoting(self):
context = op_fixture("postgresql")
op.create_index("geocoded", "locations", ["IShouldBeQuoted"])
context.assert_(
'CREATE INDEX geocoded ON locations ("IShouldBeQuoted")'
)
def test_create_index_expressions(self):
context = op_fixture()
op.create_index("geocoded", "locations", [text("lower(coordinates)")])
context.assert_(
"CREATE INDEX geocoded ON locations (lower(coordinates))"
)
def test_add_column(self):
context = op_fixture()
op.add_column("t1", Column("c1", Integer, nullable=False))
context.assert_("ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL")
def test_add_column_schema(self):
context = op_fixture()
op.add_column(
"t1", Column("c1", Integer, nullable=False), schema="foo"
)
context.assert_("ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL")
def test_add_column_with_default(self):
context = op_fixture()
op.add_column(
"t1", Column("c1", Integer, nullable=False, server_default="12")
)
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 INTEGER DEFAULT '12' NOT NULL"
)
def test_add_column_with_index(self):
context = op_fixture()
op.add_column("t1", Column("c1", Integer, nullable=False, index=True))
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL",
"CREATE INDEX ix_t1_c1 ON t1 (c1)",
)
def test_add_column_schema_with_default(self):
context = op_fixture()
op.add_column(
"t1",
Column("c1", Integer, nullable=False, server_default="12"),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER DEFAULT '12' NOT NULL"
)
def test_add_column_fk(self):
context = op_fixture()
op.add_column(
"t1", Column("c1", Integer, ForeignKey("c2.id"), nullable=False)
)
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES c2 (id)",
)
def test_add_column_schema_fk(self):
context = op_fixture()
op.add_column(
"t1",
Column("c1", Integer, ForeignKey("c2.id"), nullable=False),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES c2 (id)",
)
def test_add_column_schema_type(self):
"""Test that a schema type generates its constraints...."""
context = op_fixture()
op.add_column("t1", Column("c1", Boolean, nullable=False))
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL",
"ALTER TABLE t1 ADD CHECK (c1 IN (0, 1))",
)
def test_add_column_schema_schema_type(self):
"""Test that a schema type generates its constraints...."""
context = op_fixture()
op.add_column(
"t1", Column("c1", Boolean, nullable=False), schema="foo"
)
context.assert_(
"ALTER TABLE foo.t1 ADD COLUMN c1 BOOLEAN NOT NULL",
"ALTER TABLE foo.t1 ADD CHECK (c1 IN (0, 1))",
)
def test_add_column_schema_type_checks_rule(self):
"""Test that a schema type doesn't generate a
constraint based on check rule."""
context = op_fixture("postgresql")
op.add_column("t1", Column("c1", Boolean, nullable=False))
context.assert_("ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL")
def test_add_column_fk_self_referential(self):
context = op_fixture()
op.add_column(
"t1", Column("c1", Integer, ForeignKey("t1.c2"), nullable=False)
)
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES t1 (c2)",
)
def test_add_column_schema_fk_self_referential(self):
context = op_fixture()
op.add_column(
"t1",
Column("c1", Integer, ForeignKey("foo.t1.c2"), nullable=False),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES foo.t1 (c2)",
)
def test_add_column_fk_schema(self):
context = op_fixture()
op.add_column(
"t1",
Column("c1", Integer, ForeignKey("remote.t2.c2"), nullable=False),
)
context.assert_(
"ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES remote.t2 (c2)",
)
def test_add_column_schema_fk_schema(self):
context = op_fixture()
op.add_column(
"t1",
Column("c1", Integer, ForeignKey("remote.t2.c2"), nullable=False),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL",
"ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES remote.t2 (c2)",
)
def test_drop_column(self):
context = op_fixture()
op.drop_column("t1", "c1")
context.assert_("ALTER TABLE t1 DROP COLUMN c1")
def test_drop_column_schema(self):
context = op_fixture()
op.drop_column("t1", "c1", schema="foo")
context.assert_("ALTER TABLE foo.t1 DROP COLUMN c1")
def test_alter_column_nullable(self):
context = op_fixture()
op.alter_column("t", "c", nullable=True)
context.assert_(
# TODO: not sure if this is PG only or standard
# SQL
"ALTER TABLE t ALTER COLUMN c DROP NOT NULL"
)
def test_alter_column_schema_nullable(self):
context = op_fixture()
op.alter_column("t", "c", nullable=True, schema="foo")
context.assert_(
# TODO: not sure if this is PG only or standard
# SQL
"ALTER TABLE foo.t ALTER COLUMN c DROP NOT NULL"
)
def test_alter_column_not_nullable(self):
context = op_fixture()
op.alter_column("t", "c", nullable=False)
context.assert_(
# TODO: not sure if this is PG only or standard
# SQL
"ALTER TABLE t ALTER COLUMN c SET NOT NULL"
)
def test_alter_column_schema_not_nullable(self):
context = op_fixture()
op.alter_column("t", "c", nullable=False, schema="foo")
context.assert_(
# TODO: not sure if this is PG only or standard
# SQL
"ALTER TABLE foo.t ALTER COLUMN c SET NOT NULL"
)
def test_alter_column_rename(self):
context = op_fixture()
op.alter_column("t", "c", new_column_name="x")
context.assert_("ALTER TABLE t RENAME c TO x")
def test_alter_column_schema_rename(self):
context = op_fixture()
op.alter_column("t", "c", new_column_name="x", schema="foo")
context.assert_("ALTER TABLE foo.t RENAME c TO x")
def test_alter_column_type(self):
context = op_fixture()
op.alter_column("t", "c", type_=String(50))
context.assert_("ALTER TABLE t ALTER COLUMN c TYPE VARCHAR(50)")
def test_alter_column_schema_type(self):
context = op_fixture()
op.alter_column("t", "c", type_=String(50), schema="foo")
context.assert_("ALTER TABLE foo.t ALTER COLUMN c TYPE VARCHAR(50)")
def test_alter_column_set_default(self):
context = op_fixture()
op.alter_column("t", "c", server_default="q")
context.assert_("ALTER TABLE t ALTER COLUMN c SET DEFAULT 'q'")
def test_alter_column_schema_set_default(self):
context = op_fixture()
op.alter_column("t", "c", server_default="q", schema="foo")
context.assert_("ALTER TABLE foo.t ALTER COLUMN c SET DEFAULT 'q'")
def test_alter_column_set_compiled_default(self):
context = op_fixture()
op.alter_column(
"t", "c", server_default=func.utc_thing(func.current_timestamp())
)
context.assert_(
"ALTER TABLE t ALTER COLUMN c "
"SET DEFAULT utc_thing(CURRENT_TIMESTAMP)"
)
def test_alter_column_schema_set_compiled_default(self):
context = op_fixture()
op.alter_column(
"t",
"c",
server_default=func.utc_thing(func.current_timestamp()),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t ALTER COLUMN c "
"SET DEFAULT utc_thing(CURRENT_TIMESTAMP)"
)
def test_alter_column_drop_default(self):
context = op_fixture()
op.alter_column("t", "c", server_default=None)
context.assert_("ALTER TABLE t ALTER COLUMN c DROP DEFAULT")
def test_alter_column_schema_drop_default(self):
context = op_fixture()
op.alter_column("t", "c", server_default=None, schema="foo")
context.assert_("ALTER TABLE foo.t ALTER COLUMN c DROP DEFAULT")
def test_alter_column_schema_type_unnamed(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column("t", "c", type_=Boolean())
context.assert_(
"ALTER TABLE t ALTER COLUMN c BIT",
"ALTER TABLE t ADD CHECK (c IN (0, 1))",
)
def test_alter_column_schema_schema_type_unnamed(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column("t", "c", type_=Boolean(), schema="foo")
context.assert_(
"ALTER TABLE foo.t ALTER COLUMN c BIT",
"ALTER TABLE foo.t ADD CHECK (c IN (0, 1))",
)
def test_alter_column_schema_type_named(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column("t", "c", type_=Boolean(name="xyz"))
context.assert_(
"ALTER TABLE t ALTER COLUMN c BIT",
"ALTER TABLE t ADD CONSTRAINT xyz CHECK (c IN (0, 1))",
)
def test_alter_column_schema_schema_type_named(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column("t", "c", type_=Boolean(name="xyz"), schema="foo")
context.assert_(
"ALTER TABLE foo.t ALTER COLUMN c BIT",
"ALTER TABLE foo.t ADD CONSTRAINT xyz CHECK (c IN (0, 1))",
)
def test_alter_column_schema_type_existing_type(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column(
"t", "c", type_=String(10), existing_type=Boolean(name="xyz")
)
context.assert_(
"ALTER TABLE t DROP CONSTRAINT xyz",
"ALTER TABLE t ALTER COLUMN c VARCHAR(10)",
)
def test_alter_column_schema_schema_type_existing_type(self):
context = op_fixture("mssql", native_boolean=False)
op.alter_column(
"t",
"c",
type_=String(10),
existing_type=Boolean(name="xyz"),
schema="foo",
)
context.assert_(
"ALTER TABLE foo.t DROP CONSTRAINT xyz",
"ALTER TABLE foo.t ALTER COLUMN c VARCHAR(10)",
)
def test_alter_column_schema_type_existing_type_no_const(self):
context = op_fixture("postgresql")
op.alter_column("t", "c", type_=String(10), existing_type=Boolean())
context.assert_("ALTER TABLE t ALTER COLUMN c TYPE VARCHAR(10)")
def test_alter_column_schema_schema_type_existing_type_no_const(self):
context = op_fixture("postgresql")
op.alter_column(
"t", "c", type_=String(10), existing_type=Boolean(), schema="foo"
)
context.assert_("ALTER TABLE foo.t ALTER COLUMN c TYPE VARCHAR(10)")
def test_alter_column_schema_type_existing_type_no_new_type(self):
context = op_fixture("postgresql")
op.alter_column("t", "c", nullable=False, existing_type=Boolean())
context.assert_("ALTER TABLE t ALTER COLUMN c SET NOT NULL")
def test_alter_column_schema_schema_type_existing_type_no_new_type(self):
context = op_fixture("postgresql")
op.alter_column(
"t", "c", nullable=False, existing_type=Boolean(), schema="foo"
)
context.assert_("ALTER TABLE foo.t ALTER COLUMN c SET NOT NULL")
def test_add_foreign_key(self):
context = op_fixture()
op.create_foreign_key(
"fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"]
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho)"
)
def test_add_foreign_key_schema(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
source_schema="foo2",
referent_schema="bar2",
)
context.assert_(
"ALTER TABLE foo2.t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES bar2.t2 (bat, hoho)"
)
def test_add_foreign_key_schema_same_tablename(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t1",
["foo", "bar"],
["bat", "hoho"],
source_schema="foo2",
referent_schema="bar2",
)
context.assert_(
"ALTER TABLE foo2.t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES bar2.t1 (bat, hoho)"
)
def test_add_foreign_key_onupdate(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
onupdate="CASCADE",
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho) ON UPDATE CASCADE"
)
def test_add_foreign_key_ondelete(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
ondelete="CASCADE",
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho) ON DELETE CASCADE"
)
def test_add_foreign_key_deferrable(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
deferrable=True,
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho) DEFERRABLE"
)
def test_add_foreign_key_initially(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
initially="INITIAL",
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho) INITIALLY INITIAL"
)
@config.requirements.foreign_key_match
def test_add_foreign_key_match(self):
context = op_fixture()
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
match="SIMPLE",
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) "
"REFERENCES t2 (bat, hoho) MATCH SIMPLE"
)
def test_add_foreign_key_dialect_kw(self):
op_fixture()
with mock.patch("sqlalchemy.schema.ForeignKeyConstraint") as fkc:
op.create_foreign_key(
"fk_test",
"t1",
"t2",
["foo", "bar"],
["bat", "hoho"],
foobar_arg="xyz",
)
if config.requirements.foreign_key_match.enabled:
eq_(
fkc.mock_calls[0],
mock.call(
["foo", "bar"],
["t2.bat", "t2.hoho"],
onupdate=None,
ondelete=None,
name="fk_test",
foobar_arg="xyz",
deferrable=None,
initially=None,
match=None,
),
)
else:
eq_(
fkc.mock_calls[0],
mock.call(
["foo", "bar"],
["t2.bat", "t2.hoho"],
onupdate=None,
ondelete=None,
name="fk_test",
foobar_arg="xyz",
deferrable=None,
initially=None,
),
)
def test_add_foreign_key_self_referential(self):
context = op_fixture()
op.create_foreign_key("fk_test", "t1", "t1", ["foo"], ["bar"])
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT fk_test "
"FOREIGN KEY(foo) REFERENCES t1 (bar)"
)
def test_add_primary_key_constraint(self):
context = op_fixture()
op.create_primary_key("pk_test", "t1", ["foo", "bar"])
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT pk_test PRIMARY KEY (foo, bar)"
)
def test_add_primary_key_constraint_schema(self):
context = op_fixture()
op.create_primary_key("pk_test", "t1", ["foo"], schema="bar")
context.assert_(
"ALTER TABLE bar.t1 ADD CONSTRAINT pk_test PRIMARY KEY (foo)"
)
def test_add_check_constraint(self):
context = op_fixture()
op.create_check_constraint(
"ck_user_name_len", "user_table", func.len(column("name")) > 5
)
context.assert_(
"ALTER TABLE user_table ADD CONSTRAINT ck_user_name_len "
"CHECK (len(name) > 5)"
)
def test_add_check_constraint_schema(self):
context = op_fixture()
op.create_check_constraint(
"ck_user_name_len",
"user_table",
func.len(column("name")) > 5,
schema="foo",
)
context.assert_(
"ALTER TABLE foo.user_table ADD CONSTRAINT ck_user_name_len "
"CHECK (len(name) > 5)"
)
def test_add_unique_constraint(self):
context = op_fixture()
op.create_unique_constraint("uk_test", "t1", ["foo", "bar"])
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT uk_test UNIQUE (foo, bar)"
)
def test_add_foreign_key_legacy_kwarg(self):
context = op_fixture()
op.create_foreign_key(
name="some_fk",
source="some_table",
referent="referred_table",
local_cols=["a", "b"],
remote_cols=["c", "d"],
ondelete="CASCADE",
)
context.assert_(
"ALTER TABLE some_table ADD CONSTRAINT some_fk "
"FOREIGN KEY(a, b) REFERENCES referred_table (c, d) "
"ON DELETE CASCADE"
)
def test_add_unique_constraint_legacy_kwarg(self):
context = op_fixture()
op.create_unique_constraint(
name="uk_test", source="t1", local_cols=["foo", "bar"]
)
context.assert_(
"ALTER TABLE t1 ADD CONSTRAINT uk_test UNIQUE (foo, bar)"
)
def test_drop_constraint_legacy_kwarg(self):
context = op_fixture()
op.drop_constraint(
name="pk_name", table_name="sometable", type_="primary"
)
context.assert_("ALTER TABLE sometable DROP CONSTRAINT pk_name")
def test_create_pk_legacy_kwarg(self):
context = op_fixture()
op.create_primary_key(
name=None,
table_name="sometable",
cols=["router_id", "l3_agent_id"],
)
context.assert_(
"ALTER TABLE sometable ADD PRIMARY KEY (router_id, l3_agent_id)"
)
def test_legacy_kwarg_catches_arg_missing(self):
op_fixture()
assert_raises_message(
TypeError,
"missing required positional argument: columns",
op.create_primary_key,
name=None,
table_name="sometable",
wrong_cols=["router_id", "l3_agent_id"],
)
def test_add_unique_constraint_schema(self):
context = op_fixture()
op.create_unique_constraint(
"uk_test", "t1", ["foo", "bar"], schema="foo"
)
context.assert_(
"ALTER TABLE foo.t1 ADD CONSTRAINT uk_test UNIQUE (foo, bar)"
)
def test_drop_constraint(self):
context = op_fixture()
op.drop_constraint("foo_bar_bat", "t1")
context.assert_("ALTER TABLE t1 DROP CONSTRAINT foo_bar_bat")
def test_drop_constraint_schema(self):
context = op_fixture()
op.drop_constraint("foo_bar_bat", "t1", schema="foo")
context.assert_("ALTER TABLE foo.t1 DROP CONSTRAINT foo_bar_bat")
def test_create_index(self):
context = op_fixture()
op.create_index("ik_test", "t1", ["foo", "bar"])
context.assert_("CREATE INDEX ik_test ON t1 (foo, bar)")
def test_create_unique_index(self):
context = op_fixture()
op.create_index("ik_test", "t1", ["foo", "bar"], unique=True)
context.assert_("CREATE UNIQUE INDEX ik_test ON t1 (foo, bar)")
def test_create_index_quote_flag(self):
context = op_fixture()
op.create_index("ik_test", "t1", ["foo", "bar"], quote=True)
context.assert_('CREATE INDEX "ik_test" ON t1 (foo, bar)')
def test_create_index_table_col_event(self):
context = op_fixture()
op.create_index(
"ik_test", "tbl_with_auto_appended_column", ["foo", "bar"]
)
context.assert_(
"CREATE INDEX ik_test ON tbl_with_auto_appended_column (foo, bar)"
)
def test_add_unique_constraint_col_event(self):
context = op_fixture()
op.create_unique_constraint(
"ik_test", "tbl_with_auto_appended_column", ["foo", "bar"]
)
context.assert_(
"ALTER TABLE tbl_with_auto_appended_column "
"ADD CONSTRAINT ik_test UNIQUE (foo, bar)"
)
def test_create_index_schema(self):
context = op_fixture()
op.create_index("ik_test", "t1", ["foo", "bar"], schema="foo")
context.assert_("CREATE INDEX ik_test ON foo.t1 (foo, bar)")
def test_drop_index(self):
context = op_fixture()
op.drop_index("ik_test")
context.assert_("DROP INDEX ik_test")
def test_drop_index_schema(self):
context = op_fixture()
op.drop_index("ik_test", schema="foo")
context.assert_("DROP INDEX foo.ik_test")
def test_drop_table(self):
context = op_fixture()
op.drop_table("tb_test")
context.assert_("DROP TABLE tb_test")
def test_drop_table_schema(self):
context = op_fixture()
op.drop_table("tb_test", schema="foo")
context.assert_("DROP TABLE foo.tb_test")
def test_create_table_selfref(self):
context = op_fixture()
op.create_table(
"some_table",
Column("id", Integer, primary_key=True),
Column("st_id", Integer, ForeignKey("some_table.id")),
)
context.assert_(
"CREATE TABLE some_table ("
"id INTEGER NOT NULL, "
"st_id INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(st_id) REFERENCES some_table (id))"
)
def test_create_table_fk_and_schema(self):
context = op_fixture()
t1 = op.create_table(
"some_table",
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id")),
schema="schema",
)
context.assert_(
"CREATE TABLE schema.some_table ("
"id INTEGER NOT NULL, "
"foo_id INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(foo_id) REFERENCES foo (id))"
)
eq_(t1.c.id.name, "id")
eq_(t1.schema, "schema")
def test_create_table_no_pk(self):
context = op_fixture()
t1 = op.create_table(
"some_table",
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
context.assert_(
"CREATE TABLE some_table (x INTEGER, y INTEGER, z INTEGER)"
)
assert not t1.primary_key
def test_create_table_two_fk(self):
context = op_fixture()
op.create_table(
"some_table",
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id")),
Column("foo_bar", Integer, ForeignKey("foo.bar")),
)
context.assert_(
"CREATE TABLE some_table ("
"id INTEGER NOT NULL, "
"foo_id INTEGER, "
"foo_bar INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(foo_id) REFERENCES foo (id), "
"FOREIGN KEY(foo_bar) REFERENCES foo (bar))"
)
def test_inline_literal(self):
context = op_fixture()
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer
account = table(
"account", column("name", String), column("id", Integer)
)
op.execute(
account.update()
.where(account.c.name == op.inline_literal("account 1"))
.values({"name": op.inline_literal("account 2")})
)
op.execute(
account.update()
.where(account.c.id == op.inline_literal(1))
.values({"id": op.inline_literal(2)})
)
context.assert_(
"UPDATE account SET name='account 2' "
"WHERE account.name = 'account 1'",
"UPDATE account SET id=2 WHERE account.id = 1",
)
def test_cant_op(self):
if hasattr(op, "_proxy"):
del op._proxy
assert_raises_message(
NameError,
"Can't invoke function 'inline_literal', as the "
"proxy object has not yet been established "
"for the Alembic 'Operations' class. "
"Try placing this code inside a callable.",
op.inline_literal,
"asdf",
)
def test_naming_changes(self):
context = op_fixture()
op.alter_column("t", "c", name="x")
context.assert_("ALTER TABLE t RENAME c TO x")
context = op_fixture()
op.alter_column("t", "c", new_column_name="x")
context.assert_("ALTER TABLE t RENAME c TO x")
context = op_fixture("mysql")
op.drop_constraint("f1", "t1", type="foreignkey")
context.assert_("ALTER TABLE t1 DROP FOREIGN KEY f1")
context = op_fixture("mysql")
op.drop_constraint("f1", "t1", type_="foreignkey")
context.assert_("ALTER TABLE t1 DROP FOREIGN KEY f1")
def test_naming_changes_drop_idx(self):
context = op_fixture("mssql")
op.drop_index("ik_test", tablename="t1")
context.assert_("DROP INDEX ik_test ON t1")
@config.requirements.comments
def test_create_table_comment_op(self):
context = op_fixture()
op.create_table_comment("some_table", "table comment")
context.assert_("COMMENT ON TABLE some_table IS 'table comment'")
@config.requirements.comments
def test_drop_table_comment_op(self):
context = op_fixture()
op.drop_table_comment("some_table")
context.assert_("COMMENT ON TABLE some_table IS NULL")
class SQLModeOpTest(TestBase):
def test_auto_literals(self):
context = op_fixture(as_sql=True, literal_binds=True)
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer
account = table(
"account", column("name", String), column("id", Integer)
)
op.execute(
account.update()
.where(account.c.name == op.inline_literal("account 1"))
.values({"name": op.inline_literal("account 2")})
)
op.execute(text("update table set foo=:bar").bindparams(bar="bat"))
context.assert_(
"UPDATE account SET name='account 2' "
"WHERE account.name = 'account 1'",
"update table set foo='bat'",
)
def test_create_table_literal_binds(self):
context = op_fixture(as_sql=True, literal_binds=True)
op.create_table(
"some_table",
Column("id", Integer, primary_key=True),
Column("st_id", Integer, ForeignKey("some_table.id")),
)
context.assert_(
"CREATE TABLE some_table (id INTEGER NOT NULL, st_id INTEGER, "
"PRIMARY KEY (id), FOREIGN KEY(st_id) REFERENCES some_table (id))"
)
class CustomOpTest(TestBase):
def test_custom_op(self):
from alembic.operations import Operations, MigrateOperation
@Operations.register_operation("create_sequence")
class CreateSequenceOp(MigrateOperation):
"""Create a SEQUENCE."""
def __init__(self, sequence_name, **kw):
self.sequence_name = sequence_name
self.kw = kw
@classmethod
def create_sequence(cls, operations, sequence_name, **kw):
"""Issue a "CREATE SEQUENCE" instruction."""
op = CreateSequenceOp(sequence_name, **kw)
return operations.invoke(op)
@Operations.implementation_for(CreateSequenceOp)
def create_sequence(operations, operation):
operations.execute("CREATE SEQUENCE %s" % operation.sequence_name)
context = op_fixture()
op.create_sequence("foob")
context.assert_("CREATE SEQUENCE foob")
class EnsureOrigObjectFromToTest(TestBase):
"""the to_XYZ and from_XYZ methods are used heavily in autogenerate.
It's critical that these methods, at least the "drop" form,
always return the *same* object if available so that all the info
passed into to_XYZ is maintained in the from_XYZ.
"""
def test_drop_index(self):
schema_obj = schemaobj.SchemaObjects()
idx = schema_obj.index("x", "y", ["z"])
op = ops.DropIndexOp.from_index(idx)
is_(op.to_index(), idx)
def test_create_index(self):
schema_obj = schemaobj.SchemaObjects()
idx = schema_obj.index("x", "y", ["z"])
op = ops.CreateIndexOp.from_index(idx)
is_(op.to_index(), idx)
def test_drop_table(self):
schema_obj = schemaobj.SchemaObjects()
table = schema_obj.table("x", Column("q", Integer))
op = ops.DropTableOp.from_table(table)
is_(op.to_table(), table)
def test_create_table(self):
schema_obj = schemaobj.SchemaObjects()
table = schema_obj.table("x", Column("q", Integer))
op = ops.CreateTableOp.from_table(table)
is_(op.to_table(), table)
def test_drop_unique_constraint(self):
schema_obj = schemaobj.SchemaObjects()
const = schema_obj.unique_constraint("x", "foobar", ["a"])
op = ops.DropConstraintOp.from_constraint(const)
is_(op.to_constraint(), const)
def test_drop_constraint_not_available(self):
op = ops.DropConstraintOp("x", "y", type_="unique")
assert_raises_message(
ValueError, "constraint cannot be produced", op.to_constraint
)
| 34.240976
| 79
| 0.580648
| 4,169
| 35,097
| 4.640201
| 0.072439
| 0.036185
| 0.076919
| 0.08581
| 0.800827
| 0.757302
| 0.71848
| 0.687258
| 0.65221
| 0.609977
| 0
| 0.011112
| 0.307719
| 35,097
| 1,024
| 80
| 34.274414
| 0.785076
| 0.0208
| 0
| 0.457901
| 0
| 0
| 0.234569
| 0.00831
| 0
| 0
| 0
| 0.000977
| 0.114187
| 1
| 0.119954
| false
| 0
| 0.032295
| 0
| 0.15917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a704548eef396f480381eb006560c41ab87c35e
| 4,868
|
py
|
Python
|
mmtbx/conformation_dependent_library/tst_mcl_02.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/conformation_dependent_library/tst_mcl_02.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/conformation_dependent_library/tst_mcl_02.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import os
from libtbx import easy_run
pdb_string = '''
CRYST1 589.160 589.160 589.160 90.00 90.00 90.00 P 1
SCALE1 0.001697 0.000000 0.000000 0.00000
SCALE2 0.000000 0.001697 0.000000 0.00000
SCALE3 0.000000 0.000000 0.001697 0.00000
ATOM 1 N CYS L 104 339.804 329.680 261.810 1.00 58.36 N
ATOM 2 CA CYS L 104 338.530 329.122 261.395 1.00 60.90 C
ATOM 3 C CYS L 104 337.438 329.433 262.396 1.00 63.21 C
ATOM 4 O CYS L 104 336.436 330.048 262.048 1.00 65.22 O
ATOM 5 CB CYS L 104 338.651 327.620 261.216 1.00 60.57 C
ATOM 6 SG CYS L 104 339.341 326.788 262.654 1.00 60.22 S
ATOM 7 N CYS L 108 338.113 325.778 267.745 1.00 68.92 N
ATOM 8 CA CYS L 108 339.312 325.068 267.300 1.00 64.76 C
ATOM 9 C CYS L 108 340.555 325.418 268.121 1.00 66.03 C
ATOM 10 O CYS L 108 341.588 324.784 267.939 1.00 66.50 O
ATOM 11 CB CYS L 108 339.571 325.286 265.807 1.00 60.82 C
ATOM 12 SG CYS L 108 338.793 324.031 264.774 1.00 56.51 S
ATOM 13 N CYS L 120 343.196 322.319 267.971 1.00 57.56 N
ATOM 14 CA CYS L 120 342.863 321.598 266.735 1.00 57.89 C
ATOM 15 C CYS L 120 344.051 321.463 265.784 1.00 59.16 C
ATOM 16 O CYS L 120 344.945 322.303 265.759 1.00 57.82 O
ATOM 17 CB CYS L 120 341.697 322.250 265.986 1.00 56.77 C
ATOM 18 SG CYS L 120 341.374 321.502 264.369 1.00 53.53 S
ATOM 19 N ILE L 121 344.035 320.383 265.006 1.00 61.55 N
ATOM 20 CA ILE L 121 345.070 320.108 264.011 1.00 62.65 C
ATOM 21 C ILE L 121 345.134 321.190 262.946 1.00 61.66 C
ATOM 22 O ILE L 121 346.214 321.514 262.462 1.00 61.29 O
ATOM 23 CB ILE L 121 344.869 318.724 263.337 1.00 64.79 C
ATOM 24 CG1 ILE L 121 346.106 318.329 262.529 1.00 65.49 C
ATOM 25 CG2 ILE L 121 343.629 318.694 262.442 1.00 64.60 C
ATOM 26 CD1 ILE L 121 346.129 316.868 262.147 1.00 66.90 C
ATOM 27 N CYS L 122 343.982 321.748 262.588 1.00 64.24 N
ATOM 28 CA CYS L 122 343.921 322.801 261.579 1.00 68.72 C
ATOM 29 C CYS L 122 344.962 323.861 261.847 1.00 69.64 C
ATOM 30 O CYS L 122 345.597 324.371 260.927 1.00 73.06 O
ATOM 31 CB CYS L 122 342.536 323.448 261.549 1.00 70.95 C
ATOM 32 SG CYS L 122 342.155 324.561 262.930 1.00 73.02 S
ATOM 33 N CYS L 145 338.912 319.344 260.063 1.00 94.97 N
ATOM 34 CA CYS L 145 338.541 318.949 261.422 1.00 92.47 C
ATOM 35 C CYS L 145 337.032 318.807 261.564 1.00 88.78 C
ATOM 36 O CYS L 145 336.270 319.314 260.742 1.00 89.04 O
ATOM 37 CB CYS L 145 339.074 319.959 262.435 1.00 93.38 C
ATOM 38 SG CYS L 145 338.049 321.426 262.672 1.00 97.07 S
ATOM 39 N CYS L 148 334.801 322.398 262.418 1.00 90.56 N
ATOM 40 CA CYS L 148 334.536 323.276 261.291 1.00 88.35 C
ATOM 41 C CYS L 148 335.420 322.848 260.130 1.00 93.31 C
ATOM 42 O CYS L 148 336.536 322.377 260.342 1.00 95.93 O
ATOM 43 CB CYS L 148 334.872 324.701 261.685 1.00 84.75 C
ATOM 44 SG CYS L 148 336.541 324.858 262.342 1.00 79.52 S
ATOM 45 N CYS L 150 338.090 323.242 257.957 1.00 99.57 N
ATOM 46 CA CYS L 150 339.243 324.167 257.968 1.00 99.81 C
ATOM 47 C CYS L 150 340.562 323.546 257.510 1.00 98.81 C
ATOM 48 O CYS L 150 341.485 323.322 258.299 1.00 93.05 O
ATOM 49 CB CYS L 150 339.390 324.814 259.342 1.00 97.02 C
ATOM 50 SG CYS L 150 339.376 323.663 260.725 1.00 94.12 S
TER
HETATM 51 ZN ZN L1000 337.686 325.976 263.586 1.00 31.11 Zn
HETATM 52 ZN ZN L1001 340.645 325.701 261.809 1.00 38.33 Zn
HETATM 53 ZN ZN L1002 340.121 322.858 263.346 1.00 15.24 Zn
END
'''
def main():
with open('tst_mcl_02.pdb', 'w') as f:
f.write(pdb_string)
cmd = 'phenix.pdb_interpretation tst_mcl_02.pdb write_geo=1'
print (cmd)
rc = easy_run.go(cmd)
assert os.path.exists('tst_mcl_02.pdb.geo')
return rc.return_code
if __name__ == '__main__':
rc = main()
assert not rc
print('OK')
| 60.85
| 78
| 0.536154
| 1,015
| 4,868
| 2.544828
| 0.339901
| 0.061556
| 0.02168
| 0.011614
| 0.025552
| 0
| 0
| 0
| 0
| 0
| 0
| 0.579661
| 0.394002
| 4,868
| 79
| 79
| 61.620253
| 0.295932
| 0
| 0
| 0
| 0
| 0.710526
| 0.928102
| 0.005136
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.013158
| false
| 0
| 0.039474
| 0
| 0.065789
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8a75ae3dc6e72421c76ca374f39b9f51b3a0e9a8
| 675
|
py
|
Python
|
closest_string/task/dataset.py
|
pchlenski/NeuroSEED
|
6318431dcce22df70948251b103374f4a6e96fff
|
[
"MIT"
] | 39
|
2021-07-07T08:09:15.000Z
|
2022-03-13T19:48:48.000Z
|
closest_string/task/dataset.py
|
pchlenski/NeuroSEED
|
6318431dcce22df70948251b103374f4a6e96fff
|
[
"MIT"
] | 1
|
2022-03-11T01:33:43.000Z
|
2022-03-11T01:33:43.000Z
|
closest_string/task/dataset.py
|
pchlenski/NeuroSEED
|
6318431dcce22df70948251b103374f4a6e96fff
|
[
"MIT"
] | 6
|
2021-11-01T06:05:00.000Z
|
2022-03-15T13:06:10.000Z
|
import torch
from util.data_handling.data_loader import index_to_one_hot
class ReferenceDataset(torch.utils.data.Dataset):
def __init__(self, sequences):
self.sequences = index_to_one_hot(sequences)
def __len__(self):
return self.sequences.shape[0]
def __getitem__(self, index):
return self.sequences[index]
class QueryDataset(torch.utils.data.Dataset):
def __init__(self, sequences, labels):
self.sequences = index_to_one_hot(sequences)
self.labels = labels
def __len__(self):
return self.sequences.shape[0]
def __getitem__(self, index):
return self.sequences[index], self.labels[index]
| 27
| 59
| 0.708148
| 86
| 675
| 5.151163
| 0.302326
| 0.234763
| 0.162528
| 0.088036
| 0.69526
| 0.69526
| 0.69526
| 0.537246
| 0.352144
| 0.352144
| 0
| 0.003683
| 0.195556
| 675
| 25
| 60
| 27
| 0.812155
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.352941
| false
| 0
| 0.117647
| 0.235294
| 0.823529
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
8a8a8b9bf09bda663025cedc1cae705aabd3434e
| 105
|
py
|
Python
|
django_orm/book_authors_proj/book_authors_app/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
django_orm/book_authors_proj/book_authors_app/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | 6
|
2020-06-06T01:50:21.000Z
|
2022-02-10T11:33:02.000Z
|
django_orm/book_authors_proj/book_authors_app/apps.py
|
gfhuertac/coding_dojo_python
|
4d17bb63fb2b9669216a0f60326d4a4b9055af7e
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BookAuthorsAppConfig(AppConfig):
name = 'book_authors_app'
| 17.5
| 38
| 0.790476
| 12
| 105
| 6.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 105
| 5
| 39
| 21
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0.152381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8a9483239631094d20459d7eafed17b98a084522
| 125
|
py
|
Python
|
music/test.py
|
stef0296/stagelightcontrol
|
d54d58b07b79663ad5872ce82293d155c1a10fcc
|
[
"BSD-2-Clause"
] | null | null | null |
music/test.py
|
stef0296/stagelightcontrol
|
d54d58b07b79663ad5872ce82293d155c1a10fcc
|
[
"BSD-2-Clause"
] | null | null | null |
music/test.py
|
stef0296/stagelightcontrol
|
d54d58b07b79663ad5872ce82293d155c1a10fcc
|
[
"BSD-2-Clause"
] | null | null | null |
import eyeD3
tag = eyeD3.Tag()
tag.link("/some/file.mp3")
print(tag.getArtist())
print(tag.getAlbum())
print(tag.getTitle())
| 17.857143
| 26
| 0.712
| 19
| 125
| 4.684211
| 0.578947
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.072
| 125
| 6
| 27
| 20.833333
| 0.741379
| 0
| 0
| 0
| 0
| 0
| 0.112
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
8a9e2812fee9375e9d9bae18e1e1d756c32cf6cd
| 71
|
py
|
Python
|
Configs/m5/objects/Process.py
|
hplp/PiMulator
|
498cbe0b1e176c7d1cc2721f20d7bc0f193a3e3b
|
[
"BSD-2-Clause"
] | null | null | null |
Configs/m5/objects/Process.py
|
hplp/PiMulator
|
498cbe0b1e176c7d1cc2721f20d7bc0f193a3e3b
|
[
"BSD-2-Clause"
] | null | null | null |
Configs/m5/objects/Process.py
|
hplp/PiMulator
|
498cbe0b1e176c7d1cc2721f20d7bc0f193a3e3b
|
[
"BSD-2-Clause"
] | null | null | null |
class Process():
def __init__(self):
self.cmd = []
| 17.75
| 23
| 0.478873
| 7
| 71
| 4.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.380282
| 71
| 4
| 24
| 17.75
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
8aa4fe750986ea4e72ce1b9013c90d894c36217d
| 404
|
py
|
Python
|
ldapom/error.py
|
f1ori/ldapom
|
eccfe25dfc8b6e9faa37cf3b72d4c7d757ded970
|
[
"MIT"
] | 2
|
2015-03-27T13:45:51.000Z
|
2017-06-25T10:27:18.000Z
|
ldapom/error.py
|
leonhandreke/ldapom
|
c54e0173123be96fa5179ef7b4aa7ad943185c3c
|
[
"MIT"
] | null | null | null |
ldapom/error.py
|
leonhandreke/ldapom
|
c54e0173123be96fa5179ef7b4aa7ad943185c3c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
class LDAPomError(Exception):
pass
class LDAPError(LDAPomError):
pass
class LDAPNoSuchObjectError(LDAPError):
pass
class LDAPInvalidCredentialsError(LDAPError):
pass
class LDAPServerDownError(LDAPError):
pass
class LDAPAttributeNameNotFoundError(LDAPomError):
pass
| 13.931034
| 50
| 0.764851
| 37
| 404
| 8.081081
| 0.513514
| 0.150502
| 0.180602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002976
| 0.168317
| 404
| 28
| 51
| 14.428571
| 0.886905
| 0.05198
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.571429
| 0.071429
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
8aafda6ac2a3b88bea76a98d16e1b8973ed6339f
| 54
|
py
|
Python
|
src/__init__.py
|
jackyyf/paste.py
|
f101fbba3e942a916e5ce99b8f02c85129271f2a
|
[
"MIT"
] | 3
|
2015-03-04T06:11:26.000Z
|
2015-04-19T00:19:46.000Z
|
src/__init__.py
|
jackyyf/paste.py
|
f101fbba3e942a916e5ce99b8f02c85129271f2a
|
[
"MIT"
] | 3
|
2015-03-29T12:05:38.000Z
|
2015-08-18T04:43:25.000Z
|
src/__init__.py
|
jackyyf/paste.py
|
f101fbba3e942a916e5ce99b8f02c85129271f2a
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
import paste
paste.run()
| 13.5
| 26
| 0.685185
| 7
| 54
| 4.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 54
| 4
| 27
| 13.5
| 0.644444
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
76d8a006f47713d5f41d1220105d691ca7526cb3
| 17,679
|
py
|
Python
|
sportsreference/nfl/constants.py
|
AidanGlickman/sportsreference
|
2129b83553e37c42f847e2f2bfbfa7287212bc5f
|
[
"MIT"
] | 1
|
2020-03-08T20:17:39.000Z
|
2020-03-08T20:17:39.000Z
|
sportsreference/nfl/constants.py
|
AidanGlickman/sportsreference
|
2129b83553e37c42f847e2f2bfbfa7287212bc5f
|
[
"MIT"
] | null | null | null |
sportsreference/nfl/constants.py
|
AidanGlickman/sportsreference
|
2129b83553e37c42f847e2f2bfbfa7287212bc5f
|
[
"MIT"
] | null | null | null |
PARSING_SCHEME = {
'name': 'a',
'games_played': 'td[data-stat="g"]:first',
'wins': 'td[data-stat="wins"]:first',
'losses': 'td[data-stat="losses"]:first',
'win_percentage': 'td[data-stat="win_loss_perc"]:first',
'points_for': 'td[data-stat="points"]:first',
'points_against': 'td[data-stat="points_opp"]:first',
'points_difference': 'td[data-stat="points_diff"]:first',
'margin_of_victory': 'td[data-stat="mov"]:first',
'strength_of_schedule': 'td[data-stat="sos_total"]:first',
'simple_rating_system': 'td[data-stat="srs_total"]:first',
'offensive_simple_rating_system': 'td[data-stat="srs_offense"]:first',
'defensive_simple_rating_system': 'td[data-stat="srs_defense"]:first',
'yards': 'td[data-stat="total_yards"]:first',
'plays': 'td[data-stat="plays_offense"]:first',
'yards_per_play': 'td[data-stat="yds_per_play_offense"]:first',
'turnovers': 'td[data-stat="turnovers"]:first',
'fumbles': 'td[data-stat="fumbles_lost"]:first',
'first_downs': 'td[data-stat="first_down"]:first',
'pass_completions': 'td[data-stat="pass_cmp"]:first',
'pass_attempts': 'td[data-stat="pass_att"]:first',
'pass_yards': 'td[data-stat="pass_yds"]:first',
'pass_touchdowns': 'td[data-stat="pass_td"]:first',
'interceptions': 'td[data-stat="pass_int"]:first',
'pass_net_yards_per_attempt': 'td[data-stat="pass_net_yds_per_att"]:first',
'pass_first_downs': 'td[data-stat="pass_fd"]:first',
'rush_attempts': 'td[data-stat="rush_att"]:first',
'rush_yards': 'td[data-stat="rush_yds"]:first',
'rush_touchdowns': 'td[data-stat="rush_td"]:first',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]:first',
'rush_first_downs': 'td[data-stat="rush_fd"]:first',
'penalties': 'td[data-stat="penalties"]:first',
'yards_from_penalties': 'td[data-stat="penalties_yds"]:first',
'first_downs_from_penalties': 'td[data-stat="pen_fd"]:first',
'percent_drives_with_points': 'td[data-stat="score_pct"]:first',
'percent_drives_with_turnovers': 'td[data-stat="turnover_pct"]:first',
'points_contributed_by_offense': 'td[data-stat="exp_pts_tot"]:first'
}
SCHEDULE_SCHEME = {
'week': 'th[data-stat="week_num"]:first',
'day': 'td[data-stat="game_day_of_week"]:first',
'date': 'td[data-stat="game_date"]:first',
'result': 'td[data-stat="game_outcome"]:first',
'overtime': 'td[data-stat="overtime"]:first',
'location': 'td[data-stat="game_location"]:first',
'opponent_name': 'td[data-stat="opp"]:first',
'points_scored': 'td[data-stat="pts_off"]:first',
'points_allowed': 'td[data-stat="pts_def"]:first',
'pass_completions': 'td[data-stat="pass_cmp"]:first',
'pass_attempts': 'td[data-stat="pass_att"]:first',
'pass_yards': 'td[data-stat="pass_yds"]:first',
'pass_touchdowns': 'td[data-stat="pass_td"]:first',
'interceptions': 'td[data-stat="pass_int"]:first',
'times_sacked': 'td[data-stat="pass_sacked"]:first',
'yards_lost_from_sacks': 'td[data-stat="pass_sacked_yds"]:first',
'pass_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]:first',
'pass_completion_rate': 'td[data-stat="pass_cmp_perc"]:first',
'quarterback_rating': 'td[data-stat="pass_rating"]:first',
'rush_attempts': 'td[data-stat="rush_att"]:first',
'rush_yards': 'td[data-stat="rush_yds"]:first',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]:first',
'rush_touchdowns': 'td[data-stat="rush_td"]:first',
'field_goals_made': 'td[data-stat="fgm"]:first',
'field_goals_attempted': 'td[data-stat="fga"]:first',
'extra_points_made': 'td[data-stat="xpm"]:first',
'extra_points_attempted': 'td[data-stat="xpa"]:first',
'punts': 'td[data-stat="punt"]:first',
'punt_yards': 'td[data-stat="punt_yds"]:first',
'third_down_conversions': 'td[data-stat="third_down_success"]:first',
'third_down_attempts': 'td[data-stat="third_down_att"]:first',
'fourth_down_conversions': 'td[data-stat="fourth_down_success"]:first',
'fourth_down_attempts': 'td[data-stat="fourth_down_att"]:first',
'time_of_possession': 'td[data-stat="time_of_poss"]:first'
}
BOXSCORE_SCHEME = {
'game_info': 'div[class="scorebox_meta"]:first',
'home_name': 'a[itemprop="name"]:first',
'summary': 'table[class="linescore nohover stats_table no_freeze"]:first',
'away_name': 'a[itemprop="name"]:last',
'away_points': 'div[class="scorebox"] div[class="score"]',
'away_first_downs': 'td[data-stat="vis_stat"]',
'away_rush_attempts': 'td[data-stat="vis_stat"]',
'away_rush_yards': 'td[data-stat="vis_stat"]',
'away_rush_touchdowns': 'td[data-stat="vis_stat"]',
'away_pass_completions': 'td[data-stat="vis_stat"]',
'away_pass_attempts': 'td[data-stat="vis_stat"]',
'away_pass_yards': 'td[data-stat="vis_stat"]',
'away_pass_touchdowns': 'td[data-stat="vis_stat"]',
'away_interceptions': 'td[data-stat="vis_stat"]',
'away_times_sacked': 'td[data-stat="vis_stat"]',
'away_yards_lost_from_sacks': 'td[data-stat="vis_stat"]',
'away_net_pass_yards': 'td[data-stat="vis_stat"]',
'away_total_yards': 'td[data-stat="vis_stat"]',
'away_fumbles': 'td[data-stat="vis_stat"]',
'away_fumbles_lost': 'td[data-stat="vis_stat"]',
'away_turnovers': 'td[data-stat="vis_stat"]',
'away_penalties': 'td[data-stat="vis_stat"]',
'away_yards_from_penalties': 'td[data-stat="vis_stat"]',
'away_third_down_conversions': 'td[data-stat="vis_stat"]',
'away_third_down_attempts': 'td[data-stat="vis_stat"]',
'away_fourth_down_conversions': 'td[data-stat="vis_stat"]',
'away_fourth_down_attempts': 'td[data-stat="vis_stat"]',
'away_time_of_possession': 'td[data-stat="vis_stat"]',
'home_points': 'div[class="scorebox"] div[class="score"]',
'home_first_downs': 'td[data-stat="home_stat"]',
'home_rush_attempts': 'td[data-stat="home_stat"]',
'home_rush_yards': 'td[data-stat="home_stat"]',
'home_rush_touchdowns': 'td[data-stat="home_stat"]',
'home_pass_completions': 'td[data-stat="home_stat"]',
'home_pass_attempts': 'td[data-stat="home_stat"]',
'home_pass_yards': 'td[data-stat="home_stat"]',
'home_pass_touchdowns': 'td[data-stat="home_stat"]',
'home_interceptions': 'td[data-stat="home_stat"]',
'home_times_sacked': 'td[data-stat="home_stat"]',
'home_yards_lost_from_sacks': 'td[data-stat="home_stat"]',
'home_net_pass_yards': 'td[data-stat="home_stat"]',
'home_total_yards': 'td[data-stat="home_stat"]',
'home_fumbles': 'td[data-stat="home_stat"]',
'home_fumbles_lost': 'td[data-stat="home_stat"]',
'home_turnovers': 'td[data-stat="home_stat"]',
'home_penalties': 'td[data-stat="home_stat"]',
'home_yards_from_penalties': 'td[data-stat="home_stat"]',
'home_third_down_conversions': 'td[data-stat="home_stat"]',
'home_third_down_attempts': 'td[data-stat="home_stat"]',
'home_fourth_down_conversions': 'td[data-stat="home_stat"]',
'home_fourth_down_attempts': 'td[data-stat="home_stat"]',
'home_time_of_possession': 'td[data-stat="home_stat"]'
}
BOXSCORE_ELEMENT_INDEX = {
'date': 0,
'time': 1,
'stadium': 2,
'attendance': 3,
'duration': 4,
'away_points': 1,
'away_first_downs': 0,
'away_rush_attempts': 1,
'away_rush_yards': 1,
'away_rush_touchdowns': 1,
'away_pass_completions': 2,
'away_pass_attempts': 2,
'away_pass_yards': 2,
'away_pass_touchdowns': 2,
'away_interceptions': 2,
'away_times_sacked': 3,
'away_yards_lost_from_sacks': 3,
'away_net_pass_yards': 4,
'away_total_yards': 5,
'away_fumbles': 6,
'away_fumbles_lost': 6,
'away_turnovers': 7,
'away_penalties': 8,
'away_yards_from_penalties': 8,
'away_third_down_conversions': 9,
'away_third_down_attempts': 9,
'away_fourth_down_conversions': 10,
'away_fourth_down_attempts': 10,
'away_time_of_possession': 11,
'home_points': 0,
'home_first_downs': 0,
'home_rush_attempts': 1,
'home_rush_yards': 1,
'home_rush_touchdowns': 1,
'home_pass_completions': 2,
'home_pass_attempts': 2,
'home_pass_yards': 2,
'home_pass_touchdowns': 2,
'home_interceptions': 2,
'home_times_sacked': 3,
'home_yards_lost_from_sacks': 3,
'home_net_pass_yards': 4,
'home_total_yards': 5,
'home_fumbles': 6,
'home_fumbles_lost': 6,
'home_turnovers': 7,
'home_penalties': 8,
'home_yards_from_penalties': 8,
'home_third_down_conversions': 9,
'home_third_down_attempts': 9,
'home_fourth_down_conversions': 10,
'home_fourth_down_attempts': 10,
'home_time_of_possession': 11
}
# Designates the index of the item within the requested tag
BOXSCORE_ELEMENT_SUB_INDEX = {
'away_rush_attempts': 0,
'away_rush_yards': 1,
'away_rush_touchdowns': 2,
'away_pass_completions': 0,
'away_pass_attempts': 1,
'away_pass_yards': 2,
'away_pass_touchdowns': 3,
'away_interceptions': 4,
'away_times_sacked': 0,
'away_yards_lost_from_sacks': 1,
'away_fumbles': 0,
'away_fumbles_lost': 1,
'away_penalties': 0,
'away_yards_from_penalties': 1,
'away_third_down_conversions': 0,
'away_third_down_attempts': 1,
'away_fourth_down_conversions': 0,
'away_fourth_down_attempts': 1,
'home_rush_attempts': 0,
'home_rush_yards': 1,
'home_rush_touchdowns': 2,
'home_pass_completions': 0,
'home_pass_attempts': 1,
'home_pass_yards': 2,
'home_pass_touchdowns': 3,
'home_interceptions': 4,
'home_times_sacked': 0,
'home_yards_lost_from_sacks': 1,
'home_fumbles': 0,
'home_fumbles_lost': 1,
'home_penalties': 0,
'home_yards_from_penalties': 1,
'home_third_down_conversions': 0,
'home_third_down_attempts': 1,
'home_fourth_down_conversions': 0,
'home_fourth_down_attempts': 1,
}
PLAYER_SCHEME = {
'season': 'th[data-stat="year_id"]',
'name': 'h1[itemprop="name"]',
'team_abbreviation': 'td[data-stat="team"]',
'position': 'td[data-stat="pos"]',
'height': 'span[itemprop="height"]',
'weight': 'span[itemprop="weight"]',
'birth_date': 'td[data-stat=""]',
'contract': 'td[data-stat=""]',
'games': 'td[data-stat="g"]',
'games_started': 'td[data-stat="gs"]',
'approximate_value': 'td[data-stat="av"]',
'qb_record': 'td[data-stat="qb_rec"]',
'completed_passes': 'td[data-stat="pass_cmp"]',
'attempted_passes': 'td[data-stat="pass_att"]',
'passing_completion': 'td[data-stat="pass_cmp_perc"]',
'passing_yards': 'td[data-stat="pass_yds"]',
'passing_touchdowns': 'td[data-stat="pass_td"]',
'passing_touchdown_percentage': 'td[data-stat="pass_td_perc"]',
'interceptions_thrown': 'td[data-stat="pass_int"]',
'interception_percentage': 'td[data-stat="pass_int_perc"]',
'longest_pass': 'td[data-stat="pass_long"]',
'passing_yards_per_attempt': 'td[data-stat="pass_yds_per_att"]',
'adjusted_yards_per_attempt': 'td[data-stat="pass_adj_yds_per_att"]',
'yards_per_completed_pass': 'td[data-stat="pass_yds_per_cmp"]',
'yards_per_game_played': 'td[data-stat="pass_yds_per_g"]',
'quarterback_rating': 'td[data-stat="pass_rating"]',
'espn_qbr': 'td[data-stat="qbr"]',
'times_sacked': 'td[data-stat="pass_sacked"]',
'yards_lost_to_sacks': 'td[data-stat="pass_sacked_yds"]',
'net_yards_per_pass_attempt': 'td[data-stat="pass_net_yds_per_att"]',
'adjusted_net_yards_per_pass_attempt':
'td[data-stat="pass_adj_net_yds_per_att"]',
'sack_percentage': 'td[data-stat="pass_sacked_per"]',
'fourth_quarter_comebacks': 'td[data-stat="comebacks"]',
'game_winning_drives': 'td[data-stat="gwd"]',
'yards_per_attempt_index': 'td[data-stat="pass_yds_per_att_index"]',
'net_yards_per_attempt_index':
'td[data-stat="pass_net_yds_per_att_index"]',
'adjusted_yards_per_attempt_index':
'td[data-stat="pass_adj_yds_per_att_index"]',
'adjusted_net_yards_per_attempt_index':
'td[data-stat="pass_adj_net_yds_per_att_index"]',
'completion_percentage_index': 'td[data-stat="pass_cmp_perc_index"]',
'touchdown_percentage_index': 'td[data-stat="pass_td_perc_index"]',
'interception_percentage_index': 'td[data-stat="pass_int_perc_index"]',
'sack_percentage_index': 'td[data-stat="pass_sacked_perc_index"]',
'passer_rating_index': 'td[data-stat="pass_rating_index"]',
'rush_attempts': 'td[data-stat="rush_att"]',
'rush_yards': 'td[data-stat="rush_yds"]',
'rush_touchdowns': 'td[data-stat="rush_td"]',
'longest_rush': 'td[data-stat="rush_long"]',
'rush_yards_per_attempt': 'td[data-stat="rush_yds_per_att"]',
'rush_yards_per_game': 'td[data-stat="rush_yds_per_g"]',
'rush_attempts_per_game': 'td[data-stat="rush_att_per_g"]',
'times_pass_target': 'td[data-stat="targets"]',
'receptions': 'td[data-stat="rec"]',
'receiving_yards': 'td[data-stat="rec_yds"]',
'receiving_yards_per_reception': 'td[data-stat="rec_yds_per_rec"]',
'receiving_touchdowns': 'td[data-stat="rec_td"]',
'longest_reception': 'td[data-stat="rec_long"]',
'receptions_per_game': 'td[data-stat="rec_per_g"]',
'receiving_yards_per_game': 'td[data-stat="rec_yds_per_g"]',
'catch_percentage': 'td[data-stat="catch_pct"]',
'touches': 'td[data-stat="touches"]',
'yards_per_touch': 'td[data-stat="yds_per_touch"]',
'yards_from_scrimmage': 'td[data-stat="yds_from_scrimmage"]',
'rushing_and_receiving_touchdowns': 'td[data-stat="rush_receive_td"]',
'fumbles': 'td[data-stat="fumbles"]',
'punt_returns': 'td[data-stat="punt_ret"]',
'punt_return_yards': 'td[data-stat="punt_ret_yds"]',
'punt_return_touchdown': 'td[data-stat="punt_ret_td"]',
'longest_punt_return': 'td[data-stat="punt_ret_long"]',
'yards_per_punt_return': 'td[data-stat="punt_ret_yds_per_ret"]',
'kickoff_returns': 'td[data-stat="kick_ret"]',
'kickoff_return_yards': 'td[data-stat="kick_ret_yds"]',
'kickoff_return_touchdown': 'td[data-stat="kick_ret_td"]',
'longest_kickoff_return': 'td[data-stat="kick_ret_long"]',
'yards_per_kickoff_return': 'td[data-stat="kick_ret_yds_per_ret"]',
'all_purpose_yards': 'td[data-stat="all_purpose_yds"]',
'less_than_nineteen_yards_field_goal_attempts': 'td[data-stat="fga1"]',
'less_than_nineteen_yards_field_goals_made': 'td[data-stat="fgm1"]',
'twenty_to_twenty_nine_yard_field_goal_attempts': 'td[data-stat="fga2"]',
'twenty_to_twenty_nine_yard_field_goals_made': 'td[data-stat="fgm2"]',
'thirty_to_thirty_nine_yard_field_goal_attempts': 'td[data-stat="fga3"]',
'thirty_to_thirty_nine_yard_field_goals_made': 'td[data-stat="fgm3"]',
'fourty_to_fourty_nine_yard_field_goal_attempts': 'td[data-stat="fga4"]',
'fourty_to_fourty_nine_yard_field_goals_made': 'td[data-stat="fgm4"]',
'fifty_plus_yard_field_goal_attempts': 'td[data-stat="fga5"]',
'fifty_plus_yard_field_goals_made': 'td[data-stat="fgm5"]',
'field_goals_attempted': 'td[data-stat="fga"]',
'field_goals_made': 'td[data-stat="fgm"]',
'longest_field_goal_made': 'td[data-stat="fg_long"]',
'field_goal_percentage': 'td[data-stat="fg_perc"]',
'extra_points_attempted': 'td[data-stat="xpa"]',
'extra_points_made': 'td[data-stat="xpm"]',
'extra_point_percentage': 'td[data-stat="xp_perc"]',
'punts': 'td[data-stat="punt"]',
'total_punt_yards': 'td[data-stat="punt_yds"]',
'longest_punt': 'td[data-stat="punt_long"]',
'blocked_punts': 'td[data-stat="punt_blocked"]',
'yards_per_punt': 'td[data-stat="punt_yds_per_punt"]',
'interceptions': 'td[data-stat="def_int"]',
'yards_returned_from_interception': 'td[data-stat="def_int_yds"]',
'interceptions_returned_for_touchdown': 'td[data-stat="def_int_td"]',
'longest_interception_return': 'td[data-stat="def_int_long"]',
'passes_defended': 'td[data-stat="pass_defended"]',
'fumbles_forced': 'td[data-stat="fumbles_forced"]',
'fumbles_recovered': 'td[data-stat="fumbles_rec"]',
'yards_recovered_from_fumble': 'td[data-stat="fumbles_rec_yds"]',
'fumbles_recovered_for_touchdown': 'td[data-stat="fumbles_rec_yds"]',
'sacks': 'td[data-stat="sacks"]',
'tackles': 'td[data-stat="tackles_solo"]',
'assists_on_tackles': 'td[data-stat="tackles_assists"]',
'safeties': 'td[data-stat="safety_md"]',
'yards_lost_from_sacks': 'td[data-stat="pass_sacked_yds"]',
'fumbles_lost': 'td[data-stat="fumbles_lost"]',
'combined_tackles': 'td[data-stat="tackles_combined"]',
'solo_tackles': 'td[data-stat="tackles_solo"]',
'tackles_for_loss': 'td[data-stat="tackles_loss"]',
'quarterback_hits': 'td[data-stat="qb_hits"]',
'average_kickoff_return_yards': 'td[data-stat="kick_ret_yds_per_ret"]',
'kickoff_return_touchdowns': 'td[data-stat="kick_ret_td"]',
'average_punt_return_yards': 'td[data-stat="punt_ret_yds_per_ret"]',
'punt_return_touchdowns': 'td[data-stat="punt_ret_td"]'
}
SEASON_PAGE_URL = 'http://www.pro-football-reference.com/years/%s/'
SCHEDULE_URL = 'https://www.pro-football-reference.com/teams/%s/%s/gamelog/'
BOXSCORE_URL = 'https://www.pro-football-reference.com/boxscores/%s.htm'
BOXSCORES_URL = 'https://www.pro-football-reference.com/years/%s/week_%s.htm'
PLAYER_URL = 'https://www.pro-football-reference.com/players/%s/%s.htm'
ROSTER_URL = 'https://www.pro-football-reference.com/teams/%s/%s_roster.htm'
WILD_CARD = 100
DIVISION = 101
CONF_CHAMPIONSHIP = 102
SUPER_BOWL = 103
LOST_WILD_CARD = 'Lost WC'
LOST_DIVISIONAL = 'Lost Divisional'
LOST_CONF_CHAMPS = 'Lost Conference Championship'
LOST_SUPER_BOWL = 'Lost Super Bowl'
WON_SUPER_BOWL = 'Won Super Bowl'
| 47.018617
| 79
| 0.684428
| 2,537
| 17,679
| 4.387071
| 0.113914
| 0.167475
| 0.207547
| 0.05912
| 0.588949
| 0.478706
| 0.377089
| 0.196137
| 0.121114
| 0.079695
| 0
| 0.00758
| 0.119464
| 17,679
| 375
| 80
| 47.144
| 0.707394
| 0.003224
| 0
| 0.071625
| 0
| 0.002755
| 0.752838
| 0.521396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.