hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a68903c183ba83fb6d52c10e82e9219b6946fa95
| 323
|
py
|
Python
|
mac_random.py
|
therealedsheenan/public-spoofify
|
fab1c6f1dad1a1364268cbe79e55a3675470fe5c
|
[
"MIT"
] | null | null | null |
mac_random.py
|
therealedsheenan/public-spoofify
|
fab1c6f1dad1a1364268cbe79e55a3675470fe5c
|
[
"MIT"
] | 4
|
2020-02-11T23:39:49.000Z
|
2022-01-13T00:40:48.000Z
|
mac_random.py
|
therealedsheenan/public-spoofify
|
fab1c6f1dad1a1364268cbe79e55a3675470fe5c
|
[
"MIT"
] | null | null | null |
import random
def randomMAC():
return [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
def MACprettyprint(mac):
return ':'.join(map(lambda x: "%02x" % x, mac))
def randomize():
return MACprettyprint(randomMAC())
| 21.533333
| 51
| 0.597523
| 36
| 323
| 5.361111
| 0.527778
| 0.202073
| 0.264249
| 0.217617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096234
| 0.260062
| 323
| 14
| 52
| 23.071429
| 0.711297
| 0
| 0
| 0
| 0
| 0
| 0.015528
| 0
| 0
| 0
| 0.111801
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.1
| 0.3
| 0.7
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
472fc2f3d672c86f9759f9ac2c9539102d5e1b0b
| 175
|
py
|
Python
|
src/metrics/__init__.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 8
|
2021-10-12T05:39:20.000Z
|
2022-03-31T10:55:01.000Z
|
src/metrics/__init__.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 1
|
2022-03-30T19:23:42.000Z
|
2022-03-30T19:23:42.000Z
|
src/metrics/__init__.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 5
|
2021-11-17T07:38:28.000Z
|
2022-01-31T10:46:36.000Z
|
from .common import *
from .classification import *
from .segmentation import *
from .verification import *
from .retrieval import *
from .metric_manager import MetricManager
| 25
| 41
| 0.8
| 20
| 175
| 6.95
| 0.5
| 0.359712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137143
| 175
| 6
| 42
| 29.166667
| 0.92053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b317e57461183461d3c250ff021fc4851ed0c97
| 744
|
py
|
Python
|
researchutils/image/converter.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 1
|
2018-09-06T00:54:49.000Z
|
2018-09-06T00:54:49.000Z
|
researchutils/image/converter.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 28
|
2018-08-25T03:54:30.000Z
|
2018-10-14T12:09:47.000Z
|
researchutils/image/converter.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | null | null | null |
def hwc2chw(image):
"""
Changes the order of image pixels from Height-Width-Color to Color-Height-Width
Parameters
-------
image : numpy.ndarray
Image with pixels in Height-Width-Color order
Returns
-------
image : numpy.ndarray
Image with pixels in Color-Height-Width order
"""
return image.transpose((2, 0, 1))
def chw2hwc(image):
"""
Changes the order of image pixels from Color-Height-Width to Height-Width-Color
Parameters
-------
image : numpy.ndarray
Image with pixels in Color-Height-Width order
Returns
-------
image : numpy.ndarray
Image with pixels in Height-Width-Color order
"""
return image.transpose((1, 2, 0))
| 22.545455
| 83
| 0.620968
| 94
| 744
| 4.914894
| 0.265957
| 0.190476
| 0.138528
| 0.190476
| 0.709957
| 0.709957
| 0.709957
| 0.709957
| 0.506494
| 0.47619
| 0
| 0.014652
| 0.266129
| 744
| 32
| 84
| 23.25
| 0.831502
| 0.700269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
5b59e0a9cb3bd76343fae397453413bc3b371849
| 68
|
py
|
Python
|
sublime-plugin/poli/sublime/__init__.py
|
egnartsms/poli
|
3a9eab2261688ed84b83808722360356b8e67522
|
[
"MIT"
] | 1
|
2020-06-07T20:55:27.000Z
|
2020-06-07T20:55:27.000Z
|
sublime-plugin/poli/sublime/__init__.py
|
egnartsms/poli
|
3a9eab2261688ed84b83808722360356b8e67522
|
[
"MIT"
] | 2
|
2021-01-22T08:45:48.000Z
|
2021-01-22T08:45:49.000Z
|
sublime-plugin/poli/sublime/__init__.py
|
egnartsms/poli
|
3a9eab2261688ed84b83808722360356b8e67522
|
[
"MIT"
] | null | null | null |
from .edit import *
from .regedit import *
from .view_dict import *
| 17
| 24
| 0.735294
| 10
| 68
| 4.9
| 0.6
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 68
| 3
| 25
| 22.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b87871b91f38876c06627a600c27d3d24fb93ff
| 83
|
py
|
Python
|
pyfahviewer/config/__init__.py
|
BrandonDusseau/pyfahviewer
|
38d370d5bca3b20f2f683bf0879e340f4a4f7297
|
[
"MIT"
] | 2
|
2020-04-17T23:11:31.000Z
|
2021-04-14T18:57:33.000Z
|
pyfahviewer/config/__init__.py
|
BrandonDusseau/pyfahviewer
|
38d370d5bca3b20f2f683bf0879e340f4a4f7297
|
[
"MIT"
] | 9
|
2020-04-17T23:49:21.000Z
|
2020-05-05T23:48:01.000Z
|
pyfahviewer/config/__init__.py
|
BrandonDusseau/pyfahviewer
|
38d370d5bca3b20f2f683bf0879e340f4a4f7297
|
[
"MIT"
] | 1
|
2020-04-17T22:43:47.000Z
|
2020-04-17T22:43:47.000Z
|
from .config import get_config, set_config
__all__ = ('get_config', 'set_config')
| 20.75
| 42
| 0.759036
| 12
| 83
| 4.583333
| 0.5
| 0.327273
| 0.436364
| 0.654545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 83
| 3
| 43
| 27.666667
| 0.753425
| 0
| 0
| 0
| 0
| 0
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7518af78dd738d705f4d7fb31902451a4006b080
| 41
|
py
|
Python
|
weheartit/__init__.py
|
aswinnnn/weheartit
|
97c71e8818ec11f55341dd0694de43c24781ab23
|
[
"MIT"
] | 1
|
2021-03-23T13:46:09.000Z
|
2021-03-23T13:46:09.000Z
|
weheartit/__init__.py
|
aswinnnn/weheartit
|
97c71e8818ec11f55341dd0694de43c24781ab23
|
[
"MIT"
] | 1
|
2021-10-09T08:37:55.000Z
|
2021-10-09T08:37:55.000Z
|
weheartit/__init__.py
|
aswinnnn/weheartit
|
97c71e8818ec11f55341dd0694de43c24781ab23
|
[
"MIT"
] | null | null | null |
# __init__.py
from .weheartit import whi
| 13.666667
| 26
| 0.780488
| 6
| 41
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 3
| 26
| 13.666667
| 0.8
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
75236691d5db825780da5c44e8b2462836a5e0a4
| 147
|
py
|
Python
|
bugsnag/wsgi/__init__.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | 76
|
2015-03-01T11:46:57.000Z
|
2022-02-18T10:57:44.000Z
|
bugsnag/wsgi/__init__.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | 119
|
2015-01-14T11:53:08.000Z
|
2022-03-30T08:22:50.000Z
|
bugsnag/wsgi/__init__.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | 46
|
2015-02-09T23:50:57.000Z
|
2022-01-06T16:04:40.000Z
|
from typing import Dict
from urllib.parse import quote
def request_path(env: Dict):
return quote('/' + env.get('PATH_INFO', '').lstrip('/'))
| 21
| 60
| 0.680272
| 21
| 147
| 4.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14966
| 147
| 6
| 61
| 24.5
| 0.784
| 0
| 0
| 0
| 0
| 0
| 0.07483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
755b8168748f1f7d664c1eb56c8d4b37b99d430f
| 1,451
|
py
|
Python
|
src/mnist_backbone.py
|
StevenHuang2020/PyTorch_Learning
|
292508f6794cfb23e0767139c1c703b9525cdb35
|
[
"MIT"
] | null | null | null |
src/mnist_backbone.py
|
StevenHuang2020/PyTorch_Learning
|
292508f6794cfb23e0767139c1c703b9525cdb35
|
[
"MIT"
] | null | null | null |
src/mnist_backbone.py
|
StevenHuang2020/PyTorch_Learning
|
292508f6794cfb23e0767139c1c703b9525cdb35
|
[
"MIT"
] | null | null | null |
#python3 Steven 12/05/20,Auckland,NZ
#pytorch backbone models
import torch
from commonTorch import ClassifierCNN_NetBB
from summaryModel import summaryNet
from backbones import*
def main():
nClass = 10
net = ClassifierCNN_NetBB(nClass, backbone=alexnet)
summaryNet(net, (3,512,512))
#net = ClassifierCNN_NetBB(nClass, backbone=vgg16)
#summaryNet(net, (3,640,480))
# net = ClassifierCNN_NetBB(nClass, backbone=resnet18)
# summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=squeezenet)
#summaryNet(net, (3,640,480))
##net = ClassifierCNN_NetBB(nClass, backbone=densenet)
##summaryNet(net, (3, 512, 512))
##net = ClassifierCNN_NetBB(nClass, backbone=inception)
##summaryNet(net, (3,640,480))
##net = ClassifierCNN_NetBB(nClass, backbone=googlenet)
##summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=shufflenet)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=mobilenet)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=resnext50_32x4d)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=wide_resnet50_2)
#summaryNet(net, (3,640,480))
#net = ClassifierCNN_NetBB(nClass, backbone=mnasnet)
#summaryNet(net, (3,640,480))
return
if __name__ == '__main__':
main()
| 29.612245
| 64
| 0.674707
| 168
| 1,451
| 5.684524
| 0.27381
| 0.245026
| 0.263874
| 0.339267
| 0.691099
| 0.633508
| 0.633508
| 0.633508
| 0.633508
| 0.633508
| 0
| 0.08982
| 0.194349
| 1,451
| 49
| 65
| 29.612245
| 0.727117
| 0.660924
| 0
| 0
| 0
| 0
| 0.017279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.363636
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f32a75ee5c791e6098d35828898f7b16caa86fd3
| 210
|
py
|
Python
|
django/web/admin.py
|
priyankcommits/priyank.co.uk
|
9ca022ef102c156d1bfe943809d97b1cc447e464
|
[
"MIT"
] | null | null | null |
django/web/admin.py
|
priyankcommits/priyank.co.uk
|
9ca022ef102c156d1bfe943809d97b1cc447e464
|
[
"MIT"
] | 4
|
2021-03-09T09:57:40.000Z
|
2022-02-12T13:42:46.000Z
|
django/web/admin.py
|
priyankcommits/priyank.co.uk
|
9ca022ef102c156d1bfe943809d97b1cc447e464
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from web.models import Subscribers, Article, Post, Profile
admin.site.register(Subscribers)
admin.site.register(Article)
admin.site.register(Post)
admin.site.register(Profile)
| 26.25
| 58
| 0.82381
| 29
| 210
| 5.965517
| 0.448276
| 0.208092
| 0.393064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07619
| 210
| 7
| 59
| 30
| 0.891753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f332e5b3dab0d4a4b710e2086f5d14ccd27126f6
| 41
|
py
|
Python
|
pbconcept/__init__.py
|
ScarletPan/probase-concept
|
57f91998876f0085dd0acf34112fb2389563d0c2
|
[
"MIT"
] | 18
|
2019-10-28T06:39:50.000Z
|
2022-02-28T12:54:08.000Z
|
pbconcept/__init__.py
|
ScarletPan/probase-concept
|
57f91998876f0085dd0acf34112fb2389563d0c2
|
[
"MIT"
] | null | null | null |
pbconcept/__init__.py
|
ScarletPan/probase-concept
|
57f91998876f0085dd0acf34112fb2389563d0c2
|
[
"MIT"
] | null | null | null |
from .conceptualize import ProbaseConcept
| 41
| 41
| 0.902439
| 4
| 41
| 9.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f3966a99b7bc35ad766f8817a1afff4948982ad6
| 26
|
py
|
Python
|
app/views/tracks/__init__.py
|
DaniFriasSolsona/DigitalMediaStore-RESTful
|
30388680eb9c10b4c609ca9a273ba69c09e61dbd
|
[
"MIT"
] | null | null | null |
app/views/tracks/__init__.py
|
DaniFriasSolsona/DigitalMediaStore-RESTful
|
30388680eb9c10b4c609ca9a273ba69c09e61dbd
|
[
"MIT"
] | null | null | null |
app/views/tracks/__init__.py
|
DaniFriasSolsona/DigitalMediaStore-RESTful
|
30388680eb9c10b4c609ca9a273ba69c09e61dbd
|
[
"MIT"
] | null | null | null |
from .resources import blp
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f3afe60bdf21be63f21daf63e4394b2a9a29e766
| 42
|
py
|
Python
|
birbcam/exposureadjust/__init__.py
|
jdpdev/birbcam
|
b44c95744d81d063f12dfb2521019ff89787c45a
|
[
"MIT"
] | 11
|
2021-03-19T18:58:19.000Z
|
2022-03-23T16:57:06.000Z
|
birbcam/exposureadjust/__init__.py
|
jdpdev/birbcam
|
b44c95744d81d063f12dfb2521019ff89787c45a
|
[
"MIT"
] | 13
|
2021-03-16T00:44:52.000Z
|
2021-08-31T01:44:30.000Z
|
birbcam/exposureadjust/__init__.py
|
jdpdev/birbcam
|
b44c95744d81d063f12dfb2521019ff89787c45a
|
[
"MIT"
] | 3
|
2021-03-12T15:56:14.000Z
|
2022-01-03T19:41:17.000Z
|
from .exposureadjust import ExposureAdjust
| 42
| 42
| 0.904762
| 4
| 42
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
340556e1e0c08283d52d7ab00c0bd69cfae55d5b
| 96
|
py
|
Python
|
riki/__init__.py
|
afenyvesi/riki
|
dfd6579b3400e8ebcad1c4a610902124fad8f302
|
[
"MIT"
] | null | null | null |
riki/__init__.py
|
afenyvesi/riki
|
dfd6579b3400e8ebcad1c4a610902124fad8f302
|
[
"MIT"
] | 1
|
2020-01-25T23:07:00.000Z
|
2020-01-25T23:07:00.000Z
|
riki/__init__.py
|
afenyvesi/riki
|
dfd6579b3400e8ebcad1c4a610902124fad8f302
|
[
"MIT"
] | 2
|
2020-01-25T22:21:33.000Z
|
2020-07-15T20:59:18.000Z
|
from ._version import __version__
# TODO: refactor the whole semester shit to semester configs
| 24
| 60
| 0.8125
| 13
| 96
| 5.615385
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 96
| 3
| 61
| 32
| 0.901235
| 0.604167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
343ac4986234c278328262f80d5ff0144e2423b0
| 213
|
py
|
Python
|
spinta/ufuncs/changebase/components.py
|
atviriduomenys/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 2
|
2019-03-14T06:41:14.000Z
|
2019-03-26T11:48:14.000Z
|
spinta/ufuncs/changebase/components.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 44
|
2019-04-05T15:52:45.000Z
|
2022-03-30T07:41:33.000Z
|
spinta/ufuncs/changebase/components.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 1
|
2019-04-01T09:54:27.000Z
|
2019-04-01T09:54:27.000Z
|
from __future__ import annotations
from spinta.components import Model
from spinta.components import Property
from spinta.core.ufuncs import Env
class ChangeModelBase(Env):
model: Model
prop: Property
| 17.75
| 38
| 0.798122
| 27
| 213
| 6.148148
| 0.518519
| 0.180723
| 0.240964
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159624
| 213
| 11
| 39
| 19.363636
| 0.927374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
344597918733e3cbaa91a11a58a4ce26bab24486
| 179
|
py
|
Python
|
tests/conftest.py
|
bendavies99/LedFx
|
d2b5bf3412fecb67c4e2858f83caea402a29d589
|
[
"MIT"
] | 17
|
2018-08-31T05:51:09.000Z
|
2022-02-12T15:41:33.000Z
|
tests/conftest.py
|
simon-wh/LedFx
|
393af53726a7ccaa11331a0e0bccce3f5289ed15
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
simon-wh/LedFx
|
393af53726a7ccaa11331a0e0bccce3f5289ed15
|
[
"MIT"
] | 5
|
2019-07-15T22:12:45.000Z
|
2022-02-05T10:50:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://pytest.org/latest/plugins.html
"""
from __future__ import print_function, absolute_import, division
# import pytest
| 19.888889
| 64
| 0.698324
| 23
| 179
| 5.173913
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006452
| 0.134078
| 179
| 8
| 65
| 22.375
| 0.76129
| 0.530726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
caa9a9876419f7cc024bc385bf5186f2f38087b3
| 118
|
py
|
Python
|
catkin_ws/src/my_package/scripts/service_client.py
|
SACHINKUMARHACKER/ROS
|
312daa76fbea288e5094684ad19ca52407fddbfa
|
[
"BSD-2-Clause"
] | null | null | null |
catkin_ws/src/my_package/scripts/service_client.py
|
SACHINKUMARHACKER/ROS
|
312daa76fbea288e5094684ad19ca52407fddbfa
|
[
"BSD-2-Clause"
] | null | null | null |
catkin_ws/src/my_package/scripts/service_client.py
|
SACHINKUMARHACKER/ROS
|
312daa76fbea288e5094684ad19ca52407fddbfa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def main():
rospy.init_node('service_client')
| 16.857143
| 37
| 0.745763
| 19
| 118
| 4.473684
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 118
| 7
| 37
| 16.857143
| 0.833333
| 0.169492
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
caf2fd807c83b6852b321d0fe1d8c3415691086c
| 35
|
py
|
Python
|
demo.py
|
sebedoyar/Demo-Clase
|
ad95dfc78864451da95cb0ce7fd65ef9e5b9728f
|
[
"MIT"
] | null | null | null |
demo.py
|
sebedoyar/Demo-Clase
|
ad95dfc78864451da95cb0ce7fd65ef9e5b9728f
|
[
"MIT"
] | null | null | null |
demo.py
|
sebedoyar/Demo-Clase
|
ad95dfc78864451da95cb0ce7fd65ef9e5b9728f
|
[
"MIT"
] | null | null | null |
print ('Hola Mundo desde demo.py')
| 17.5
| 34
| 0.714286
| 6
| 35
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 1
| 35
| 35
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
cafa342f68372739cf2be15cd31ab6536193eaad
| 241
|
py
|
Python
|
poetry/utils/helpers.py
|
paralax/poetry
|
16d49d10f9e333fdb2833b31f012d68c6d5ac0a6
|
[
"MIT"
] | null | null | null |
poetry/utils/helpers.py
|
paralax/poetry
|
16d49d10f9e333fdb2833b31f012d68c6d5ac0a6
|
[
"MIT"
] | null | null | null |
poetry/utils/helpers.py
|
paralax/poetry
|
16d49d10f9e333fdb2833b31f012d68c6d5ac0a6
|
[
"MIT"
] | null | null | null |
import re
_canonicalize_regex = re.compile('[-_.]+')
def canonicalize_name(name: str) -> str:
return _canonicalize_regex.sub('-', name).lower()
def module_name(name: str) -> str:
return canonicalize_name(name).replace('-', '_')
| 20.083333
| 53
| 0.676349
| 29
| 241
| 5.310345
| 0.448276
| 0.155844
| 0.25974
| 0.181818
| 0.415584
| 0.415584
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141079
| 241
| 11
| 54
| 21.909091
| 0.743961
| 0
| 0
| 0
| 0
| 0
| 0.037344
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1b020a047efb286d2a4246ddd8cdf39ac6043bbd
| 40
|
py
|
Python
|
src/poetry/puzzle/__init__.py
|
sivanbecker/poetry
|
72497bcb66b5a1cc20e3aa95973c523a22b05bfa
|
[
"MIT"
] | 1
|
2020-02-01T07:13:05.000Z
|
2020-02-01T07:13:05.000Z
|
src/poetry/puzzle/__init__.py
|
sivanbecker/poetry
|
72497bcb66b5a1cc20e3aa95973c523a22b05bfa
|
[
"MIT"
] | 1
|
2021-12-31T19:44:26.000Z
|
2022-03-08T20:52:13.000Z
|
src/poetry/puzzle/__init__.py
|
sivanbecker/poetry
|
72497bcb66b5a1cc20e3aa95973c523a22b05bfa
|
[
"MIT"
] | 2
|
2020-12-07T04:26:21.000Z
|
2021-09-25T21:46:36.000Z
|
from poetry.puzzle.solver import Solver
| 20
| 39
| 0.85
| 6
| 40
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b2d742a0714e44102b52b79bd7b5c21748364f9
| 182
|
py
|
Python
|
iostream/__init__.py
|
revival0728/py-iostream
|
5fe441e13f7054d091a4215ab23655a9b7d5eeac
|
[
"MIT"
] | null | null | null |
iostream/__init__.py
|
revival0728/py-iostream
|
5fe441e13f7054d091a4215ab23655a9b7d5eeac
|
[
"MIT"
] | null | null | null |
iostream/__init__.py
|
revival0728/py-iostream
|
5fe441e13f7054d091a4215ab23655a9b7d5eeac
|
[
"MIT"
] | null | null | null |
import sys
from .istream import istream
from .ostream import ostream
from .io import pin
from .io import pout
from .io import perr
sys.settrace(io.__iostream_checker__) # need fix
| 20.222222
| 49
| 0.791209
| 29
| 182
| 4.793103
| 0.482759
| 0.129496
| 0.258993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 182
| 9
| 49
| 20.222222
| 0.902597
| 0.043956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1b31b9d96e061908cd3c5890a8759f6fcf9c8be8
| 14,428
|
py
|
Python
|
cottonformation/res/sso.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/sso.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/sso.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropInstanceAccessControlAttributeConfigurationAccessControlAttributeValue(Property):
"""
AWS Object Type = "AWS::SSO::InstanceAccessControlAttributeConfiguration.AccessControlAttributeValue"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributevalue.html
Property Document:
- ``rp_Source``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributevalue.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributevalue-source
"""
AWS_OBJECT_TYPE = "AWS::SSO::InstanceAccessControlAttributeConfiguration.AccessControlAttributeValue"
rp_Source: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Source"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributevalue.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributevalue-source"""
@attr.s
class PropInstanceAccessControlAttributeConfigurationAccessControlAttribute(Property):
"""
AWS Object Type = "AWS::SSO::InstanceAccessControlAttributeConfiguration.AccessControlAttribute"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute.html
Property Document:
- ``rp_Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute-key
- ``rp_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute-value
"""
AWS_OBJECT_TYPE = "AWS::SSO::InstanceAccessControlAttributeConfiguration.AccessControlAttribute"
rp_Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute-key"""
rp_Value: typing.Union['PropInstanceAccessControlAttributeConfigurationAccessControlAttributeValue', dict] = attr.ib(
default=None,
converter=PropInstanceAccessControlAttributeConfigurationAccessControlAttributeValue.from_dict,
validator=attr.validators.instance_of(PropInstanceAccessControlAttributeConfigurationAccessControlAttributeValue),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattribute-value"""
#--- Resource declaration ---
@attr.s
class Assignment(Resource):
"""
AWS Object Type = "AWS::SSO::Assignment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html
Property Document:
- ``rp_InstanceArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-instancearn
- ``rp_PermissionSetArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-permissionsetarn
- ``rp_PrincipalId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-principalid
- ``rp_PrincipalType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-principaltype
- ``rp_TargetId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-targetid
- ``rp_TargetType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-targettype
"""
AWS_OBJECT_TYPE = "AWS::SSO::Assignment"
rp_InstanceArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-instancearn"""
rp_PermissionSetArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PermissionSetArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-permissionsetarn"""
rp_PrincipalId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PrincipalId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-principalid"""
rp_PrincipalType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PrincipalType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-principaltype"""
rp_TargetId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TargetId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-targetid"""
rp_TargetType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TargetType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-assignment.html#cfn-sso-assignment-targettype"""
@attr.s
class InstanceAccessControlAttributeConfiguration(Resource):
"""
AWS Object Type = "AWS::SSO::InstanceAccessControlAttributeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-instanceaccesscontrolattributeconfiguration.html
Property Document:
- ``rp_InstanceArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-instanceaccesscontrolattributeconfiguration.html#cfn-sso-instanceaccesscontrolattributeconfiguration-instancearn
- ``p_AccessControlAttributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-instanceaccesscontrolattributeconfiguration.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributes
"""
AWS_OBJECT_TYPE = "AWS::SSO::InstanceAccessControlAttributeConfiguration"
rp_InstanceArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-instanceaccesscontrolattributeconfiguration.html#cfn-sso-instanceaccesscontrolattributeconfiguration-instancearn"""
p_AccessControlAttributes: typing.List[typing.Union['PropInstanceAccessControlAttributeConfigurationAccessControlAttribute', dict]] = attr.ib(
default=None,
converter=PropInstanceAccessControlAttributeConfigurationAccessControlAttribute.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropInstanceAccessControlAttributeConfigurationAccessControlAttribute), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AccessControlAttributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-instanceaccesscontrolattributeconfiguration.html#cfn-sso-instanceaccesscontrolattributeconfiguration-accesscontrolattributes"""
@attr.s
class PermissionSet(Resource):
"""
AWS Object Type = "AWS::SSO::PermissionSet"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html
Property Document:
- ``rp_InstanceArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-instancearn
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-description
- ``p_InlinePolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-inlinepolicy
- ``p_ManagedPolicies``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-managedpolicies
- ``p_RelayStateType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-relaystatetype
- ``p_SessionDuration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-sessionduration
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-tags
"""
AWS_OBJECT_TYPE = "AWS::SSO::PermissionSet"
rp_InstanceArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstanceArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-instancearn"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-description"""
p_InlinePolicy: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "InlinePolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-inlinepolicy"""
p_ManagedPolicies: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ManagedPolicies"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-managedpolicies"""
p_RelayStateType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RelayStateType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-relaystatetype"""
p_SessionDuration: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SessionDuration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-sessionduration"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#cfn-sso-permissionset-tags"""
@property
def rv_PermissionSetArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sso-permissionset.html#aws-resource-sso-permissionset-return-values"""
return GetAtt(resource=self, attr_name="PermissionSetArn")
| 61.65812
| 265
| 0.7771
| 1,471
| 14,428
| 7.518695
| 0.059143
| 0.031826
| 0.043761
| 0.067631
| 0.878571
| 0.875226
| 0.842676
| 0.809946
| 0.809946
| 0.809946
| 0
| 0.000077
| 0.099875
| 14,428
| 233
| 266
| 61.922747
| 0.851544
| 0.329775
| 0
| 0.352459
| 0
| 0
| 0.097095
| 0.062993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008197
| false
| 0
| 0.032787
| 0
| 0.286885
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94683e867fcf095a6c6c96fe20091c6adf5781ba
| 25
|
py
|
Python
|
tldry/__init__.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | 2
|
2019-04-01T09:39:54.000Z
|
2019-05-17T19:24:39.000Z
|
tldry/__init__.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | null | null | null |
tldry/__init__.py
|
vangaa/tldry
|
0f5075dbed3cd09ac6749e09273a2e054d75445a
|
[
"MIT"
] | 1
|
2021-02-03T14:00:43.000Z
|
2021-02-03T14:00:43.000Z
|
from .tldry import TLDRy
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
84c9685f6724825c2da54b5eb9563b4f035887ea
| 46
|
py
|
Python
|
2021/CourseraPython/course2-data-structures/hey.py
|
Muramatsu2602/python-study
|
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
|
[
"MIT"
] | 2
|
2021-01-11T16:13:40.000Z
|
2022-03-02T02:03:46.000Z
|
2021/CourseraPython/course2-data-structures/hey.py
|
Muramatsu2602/python-study
|
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
|
[
"MIT"
] | null | null | null |
2021/CourseraPython/course2-data-structures/hey.py
|
Muramatsu2602/python-study
|
c81eb5d2c343817bc29b2763dcdcabed0f6a42c6
|
[
"MIT"
] | null | null | null |
print("data structs baby!!")
# asdadadadadadad
| 23
| 28
| 0.76087
| 5
| 46
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 2
| 29
| 23
| 0.833333
| 0.326087
| 0
| 0
| 0
| 0
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ca5127a4c0615ee23cb09f4ed4c5be2b27d633bf
| 31
|
py
|
Python
|
exts/omni.add_on.visualizer/omni/add_on/visualizer/tests/__init__.py
|
Toni-SM/omni.add_on.visualizer
|
0bbca6c93fb40f6c8dcded8f4b8a51df81feaf52
|
[
"MIT"
] | 3
|
2021-07-24T08:50:26.000Z
|
2022-02-23T06:25:10.000Z
|
exts/omni.add_on.visualizer/omni/add_on/visualizer/tests/__init__.py
|
Toni-SM/omni.add_on.visualizer
|
0bbca6c93fb40f6c8dcded8f4b8a51df81feaf52
|
[
"MIT"
] | 1
|
2022-03-27T17:55:39.000Z
|
2022-03-27T17:55:43.000Z
|
exts/omni.add_on.visualizer/omni/add_on/visualizer/tests/__init__.py
|
Toni-SM/omni.add_on.visualizer
|
0bbca6c93fb40f6c8dcded8f4b8a51df81feaf52
|
[
"MIT"
] | null | null | null |
from .test_visualizer import *
| 15.5
| 30
| 0.806452
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ca8899e5526768d89fe639ff3c58ac5fe8bcfec8
| 3,726
|
py
|
Python
|
test.py
|
FunkyKoki/Separable-Batch-Normalization-for-Robust-Facial-Landmark-Localization
|
361bafaffdb50e05a97aa7ef27b9bfddd07f6648
|
[
"MIT"
] | null | null | null |
test.py
|
FunkyKoki/Separable-Batch-Normalization-for-Robust-Facial-Landmark-Localization
|
361bafaffdb50e05a97aa7ef27b9bfddd07f6648
|
[
"MIT"
] | null | null | null |
test.py
|
FunkyKoki/Separable-Batch-Normalization-for-Robust-Facial-Landmark-Localization
|
361bafaffdb50e05a97aa7ef27b9bfddd07f6648
|
[
"MIT"
] | 1
|
2021-11-22T09:18:53.000Z
|
2021-11-22T09:18:53.000Z
|
import tqdm
import copy
import torch
import numpy as np
import cv2
from models import Baseline, loadWeights, BaselineSepDyBN
from datasets import WFLW256, datasetSize, kptNum
import time
from ptflops import get_model_complexity_info
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def testWFLW256Baseline(mode, test_epoch, logName):
testset = WFLW256(mode=mode, augment=False)
dataloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, pin_memory=True)
macs, params = get_model_complexity_info(Baseline(), (3, 128, 128), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
model = Baseline()
model = loadWeights(model, './checkpoints/'+logName+'_model_'+str(test_epoch)+'.pth', 'cuda:0')
model.eval().cuda('cuda:0')
errorRates = []
times = []
with torch.no_grad():
for data in tqdm.tqdm(dataloader):
img, tpts = data
img = img.cuda('cuda:0').float()
tpts = tpts.squeeze().numpy().reshape(-1, 2)
startTime = time.time()
pts = model(img)
times.append(time.time()-startTime)
pts = pts.cpu().squeeze().numpy().reshape(-1, 2)
normalizeFactor = np.sqrt((tpts[60, 0] - tpts[72, 0])**2 + (tpts[60, 1] - tpts[72, 1])**2)
errorRate = np.sum(np.sqrt(np.sum(pow(pts-tpts, 2), axis=1)))/kptNum/normalizeFactor
errorRates.append(errorRate)
errorRate = sum(errorRates) / datasetSize[mode] * 100
print(mode + ' error rate: ' + str(errorRate))
print("Avg forward time is: " + str(sum(times)/datasetSize[mode]) + "ms")
print("FPS: " + str(1/sum(times)*datasetSize[mode]))
return errorRate
def testWFLW256BaselineSepDyBN(mode, test_epoch, logName):
testset = WFLW256(mode=mode, augment=False)
dataloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, pin_memory=True)
macs, params = get_model_complexity_info(BaselineSepDyBN(temp=1), (3, 128, 128), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
model = BaselineSepDyBN(temp=1)
model = loadWeights(model, './checkpoints/'+logName+'_model_'+str(test_epoch)+'.pth', 'cuda:0')
model.eval().cuda('cuda:0')
errorRates = []
times = []
with torch.no_grad():
for data in tqdm.tqdm(dataloader):
img, tpts = data
img = img.cuda('cuda:0').float()
tpts = tpts.squeeze().numpy().reshape(-1, 2)
startTime = time.time()
pts = model(img)
times.append(time.time()-startTime)
pts = pts.cpu().squeeze().numpy().reshape(-1, 2)
normalizeFactor = np.sqrt((tpts[60, 0] - tpts[72, 0])**2 + (tpts[60, 1] - tpts[72, 1])**2)
errorRate = np.sum(np.sqrt(np.sum(pow(pts-tpts, 2), axis=1)))/kptNum/normalizeFactor
errorRates.append(errorRate)
errorRate = sum(errorRates) / datasetSize[mode] * 100
print(mode + ' error rate: ' + str(errorRate))
print("Avg forward time is: " + str(sum(times)/datasetSize[mode]) + "ms")
print("FPS: " + str(1/sum(times)*datasetSize[mode]))
return errorRate
if __name__ == "__main__":
testWFLW256Baseline('test', 'final', '20210101_WFLW256Baseline_Train_1')
testWFLW256BaselineSepDyBN('test', 'final', '20210101_WFLW256BaselineSepDyBN_Train_1')
| 40.064516
| 102
| 0.618357
| 452
| 3,726
| 4.993363
| 0.25
| 0.013292
| 0.014178
| 0.024812
| 0.788658
| 0.788658
| 0.788658
| 0.788658
| 0.788658
| 0.788658
| 0
| 0.045282
| 0.223564
| 3,726
| 92
| 103
| 40.5
| 0.734877
| 0
| 0
| 0.722222
| 0
| 0
| 0.116479
| 0.019055
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.194444
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
046f7ca774853f5d3eb7867b83c71d1422a4ad73
| 36
|
py
|
Python
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 1
|
2021-09-14T16:30:44.000Z
|
2021-09-14T16:30:44.000Z
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 22
|
2021-02-26T06:35:00.000Z
|
2021-07-16T12:37:58.000Z
|
deliverable1/test_case_05/__init__.py
|
TrackerSB/IEEEAITestChallenge2021
|
836f0cfa39a3e25a8149a9adb77c3a8270417a39
|
[
"MIT"
] | 2
|
2021-03-10T19:50:28.000Z
|
2021-08-23T08:02:36.000Z
|
from .test_case_05 import TestCase05
| 36
| 36
| 0.888889
| 6
| 36
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0.083333
| 36
| 1
| 36
| 36
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04b09aa8713d5ab8e03aaae8e43c98c2dc1a8ee7
| 23
|
py
|
Python
|
zipfanalysis/__init__.py
|
chasmani/zipfanalysis
|
ffec413522037005fa70441e7b15f9675a11cd78
|
[
"MIT"
] | null | null | null |
zipfanalysis/__init__.py
|
chasmani/zipfanalysis
|
ffec413522037005fa70441e7b15f9675a11cd78
|
[
"MIT"
] | null | null | null |
zipfanalysis/__init__.py
|
chasmani/zipfanalysis
|
ffec413522037005fa70441e7b15f9675a11cd78
|
[
"MIT"
] | null | null | null |
from .main import all
| 7.666667
| 21
| 0.73913
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 23
| 2
| 22
| 11.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04b6786e9894f69411222677b03453691f564ef4
| 94
|
py
|
Python
|
exercism/python/gigasecond/gigasecond.py
|
Cythun/online-judge-practice
|
1205480a2ff30e2a698917a7717ffe4db2fba2a5
|
[
"MIT"
] | null | null | null |
exercism/python/gigasecond/gigasecond.py
|
Cythun/online-judge-practice
|
1205480a2ff30e2a698917a7717ffe4db2fba2a5
|
[
"MIT"
] | null | null | null |
exercism/python/gigasecond/gigasecond.py
|
Cythun/online-judge-practice
|
1205480a2ff30e2a698917a7717ffe4db2fba2a5
|
[
"MIT"
] | null | null | null |
import datetime
def add_gigasecond(moment):
return moment + datetime.timedelta(0, 10**9)
| 18.8
| 48
| 0.744681
| 13
| 94
| 5.307692
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.148936
| 94
| 4
| 49
| 23.5
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
04b7f98003c0b24cde19fd3cad194d882da25e7b
| 4,455
|
py
|
Python
|
experiments/experiment_6.py
|
arijitnoobstar/OnitamaDeepRL
|
e561b22fe7728f51c1f1a078dfb19aa008bf010e
|
[
"Apache-2.0"
] | 3
|
2021-05-16T08:43:09.000Z
|
2021-05-31T16:23:43.000Z
|
experiments/experiment_6.py
|
mion666459/OnitamaAI
|
e561b22fe7728f51c1f1a078dfb19aa008bf010e
|
[
"Apache-2.0"
] | null | null | null |
experiments/experiment_6.py
|
mion666459/OnitamaAI
|
e561b22fe7728f51c1f1a078dfb19aa008bf010e
|
[
"Apache-2.0"
] | 1
|
2021-05-28T10:07:50.000Z
|
2021-05-28T10:07:50.000Z
|
# access Train.py in parent folder and set relative folder to parent folder for data saving
import os
import sys
os.chdir("..")
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from Train import *
"""
The purpose of this experiment is to lower the val constabt
for multiply to see if it can compromise validity to learn better actions
"""
# EXPERIMENT 6 PARAMETERS, NOTE: Boost is set to 0
ep_num = 20000
plot_every = 4000
moving_average = 50
minimax_boost = 0
onitama_deeprl_train("train", "DDPG", ep_num, "multiply_val_constant_5", "minimax", 1, discount_rate = 0.99,
lr_actor = 0.001, lr_critic = 0.001, tau = 0.005, board_input_shape = [4, 5, 5], card_input_shape = 10,
num_actions = 40, max_mem_size = 1000000, batch_size = 128, epsilon = 1,
epsilon_min = 0.01, update_target = None, val_constant = 5, invalid_penalty = 500, hand_of_god = True,
use_competing_AI_replay = False, win_loss_mem_size = 1000, desired_win_ratio = 0.6, use_hardcoded_cards = True,
reward_mode = "simple_reward", minimax_boost = minimax_boost, mcts_boost = 5000, plot_every = plot_every, win_loss_queue_size = 100,
architecture = "val_branch_actions_multiply", moving_average = moving_average, verbose = False)
onitama_deeprl_train("train", "DDPG", ep_num, "multiply_val_constant_1", "minimax", 1, discount_rate = 0.99,
lr_actor = 0.001, lr_critic = 0.001, tau = 0.005, board_input_shape = [4, 5, 5], card_input_shape = 10,
num_actions = 40, max_mem_size = 1000000, batch_size = 128, epsilon = 1,
epsilon_min = 0.01, update_target = None, val_constant = 1, invalid_penalty = 500, hand_of_god = True,
use_competing_AI_replay = False, win_loss_mem_size = 1000, desired_win_ratio = 0.6, use_hardcoded_cards = True,
reward_mode = "simple_reward", minimax_boost = minimax_boost, mcts_boost = 5000, plot_every = plot_every, win_loss_queue_size = 100,
architecture = "val_branch_actions_multiply", moving_average = moving_average, verbose = False)
onitama_deeprl_train("train", "DDPG", ep_num, "multiply_val_constant_0.1", "minimax", 1, discount_rate = 0.99,
lr_actor = 0.001, lr_critic = 0.001, tau = 0.005, board_input_shape = [4, 5, 5], card_input_shape = 10,
num_actions = 40, max_mem_size = 1000000, batch_size = 128, epsilon = 1,
epsilon_min = 0.01, update_target = None, val_constant = 0.1, invalid_penalty = 500, hand_of_god = True,
use_competing_AI_replay = False, win_loss_mem_size = 1000, desired_win_ratio = 0.6, use_hardcoded_cards = True,
reward_mode = "simple_reward", minimax_boost = minimax_boost, mcts_boost = 5000, plot_every = plot_every, win_loss_queue_size = 100,
architecture = "val_branch_actions_multiply", moving_average = moving_average, verbose = False)
onitama_deeprl_train("train", "DDPG", ep_num, "multiply_val_constant_0.01", "minimax", 1, discount_rate = 0.99,
lr_actor = 0.001, lr_critic = 0.001, tau = 0.005, board_input_shape = [4, 5, 5], card_input_shape = 10,
num_actions = 40, max_mem_size = 1000000, batch_size = 128, epsilon = 1,
epsilon_min = 0.01, update_target = None, val_constant = 0.01, invalid_penalty = 500, hand_of_god = True,
use_competing_AI_replay = False, win_loss_mem_size = 1000, desired_win_ratio = 0.6, use_hardcoded_cards = True,
reward_mode = "simple_reward", minimax_boost = minimax_boost, mcts_boost = 5000, plot_every = plot_every, win_loss_queue_size = 100,
architecture = "val_branch_actions_multiply", moving_average = moving_average, verbose = False)
onitama_deeprl_train("train", "DDPG", ep_num, "multiply_val_constant_0.001", "minimax", 1, discount_rate = 0.99,
lr_actor = 0.001, lr_critic = 0.001, tau = 0.005, board_input_shape = [4, 5, 5], card_input_shape = 10,
num_actions = 40, max_mem_size = 1000000, batch_size = 128, epsilon = 1,
epsilon_min = 0.01, update_target = None, val_constant = 0.001, invalid_penalty = 500, hand_of_god = True,
use_competing_AI_replay = False, win_loss_mem_size = 1000, desired_win_ratio = 0.6, use_hardcoded_cards = True,
reward_mode = "simple_reward", minimax_boost = minimax_boost, mcts_boost = 5000, plot_every = plot_every, win_loss_queue_size = 100,
architecture = "val_branch_actions_multiply", moving_average = moving_average, verbose = False)
| 75.508475
| 142
| 0.707071
| 671
| 4,455
| 4.330849
| 0.174367
| 0.016518
| 0.024776
| 0.039573
| 0.877495
| 0.877495
| 0.877495
| 0.877495
| 0.877495
| 0.877495
| 0
| 0.08391
| 0.18945
| 4,455
| 59
| 143
| 75.508475
| 0.720853
| 0.030976
| 0
| 0.568182
| 0
| 0
| 0.097748
| 0.062051
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
04e5f68f42ca705a8b76e3ac11e3a69fe8883a0f
| 26,352
|
py
|
Python
|
test/test_md007.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 20
|
2021-01-14T17:39:09.000Z
|
2022-03-14T08:35:22.000Z
|
test/test_md007.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 304
|
2020-08-15T23:24:00.000Z
|
2022-03-31T23:34:03.000Z
|
test/test_md007.py
|
jackdewinter/pymarkdown
|
7ae408ba0b24506fa07552ffe520750bbff38c53
|
[
"MIT"
] | 3
|
2021-08-11T10:26:26.000Z
|
2021-11-02T20:41:27.000Z
|
"""
Module to provide tests related to the MD007 rule.
"""
from test.markdown_scanner import MarkdownScanner
import pytest
@pytest.mark.rules
def test_md007_bad_configuration_indent():
"""
Test to verify that a configuration error is thrown when supplying the
indent value with a string that is not an integer.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md007.indent=bad",
"--strict-config",
"scan",
"test/resources/rules/md007/good_list_indentation.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md007.indent' must be of type 'int'."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_configuration_start_indented():
"""
Test to verify that a configuration error is thrown when supplying the
start_indented value with a value that is not a boolean.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md007.start_indented=bad",
"--strict-config",
"scan",
"test/resources/rules/md007/good_list_indentation.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md007.start_indented' must be of type 'bool'."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_configuration_indent_bad():
"""
Test to verify that a configuration error is thrown when supplying the
indent value with a string that is not a valid integer.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md007.indent=$#5",
"--strict-config",
"scan",
"test/resources/rules/md007/good_list_indentation.md",
]
expected_return_code = 1
expected_output = ""
expected_error = (
"BadPluginError encountered while configuring plugins:\n"
+ "The value for property 'plugins.md007.indent' is not valid: Allowable values are between 2 and 4."
)
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_list_indentation_x():
"""
Test to make sure this rule does not trigger with a document that
only has the required spaces after the list item.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_list_indentation.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_indentation_level_0():
"""
Test to make sure this rule does trigger with a document that
has the extra spaces after the level 1 list item.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_indentation_level_0.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_indentation_level_0.md:3:2: "
+ "MD007: Unordered list indentation "
+ "[Expected: 0, Actual=1] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_indentation_level_1():
"""
Test to make sure this rule does trigger with a document that
has the extra spaces after the level 2 list item.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_indentation_level_1.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_indentation_level_1.md:4:4: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_indentation_level_2():
"""
Test to make sure this rule does trigger with a document that
has the extra spaces after the level 3 list item.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_indentation_level_2.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_indentation_level_2.md:5:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 4, Actual=5] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_list_indentation_in_block_quote():
"""
Test to make sure this rule does not trigger with a document that
only has the required spaces after the list item, but in a block quote.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_list_indentation_in_block_quote.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_list_indentation_in_double_block_quote():
"""
Test to make sure this rule does not trigger with a document that
only has the required spaces after the list item, but in a doulbe block quote.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_list_indentation_in_double_block_quote.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_unordered_list_in_ordered_list():
"""
Test to make sure this rule does not trigger with a document that
only has the required spaces after the list item, but in an ordered list.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md030",
"scan",
"test/resources/rules/md007/good_unordered_list_in_ordered_list.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_unordered_list_in_ordered_list():
"""
Test to make sure this rule does trigger with a document that has
an unordered list starting with extra spaces inside of an ordered list.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md030",
"scan",
"test/resources/rules/md007/bad_unordered_list_in_ordered_list.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_unordered_list_in_ordered_list.md:2:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 5, Actual=6] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_level_1_unordered_list_in_ordered_list():
"""
Test to make sure this rule does trigger with a document that has
two nested unordered lists, the inner one starting with extra spaces,
inside of an ordered list.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"md030",
"scan",
"test/resources/rules/md007/bad_level_1_unordered_list_in_ordered_list.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_level_1_unordered_list_in_ordered_list.md:3:8: "
+ "MD007: Unordered list indentation "
+ "[Expected: 7, Actual=8] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_unordered_list_in_double_ordered_list():
"""
Test to make sure this rule does not trigger with a document that has
two nested ordered lists with a good unordered list with them that
does not have extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_unordered_list_in_double_ordered_list.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_unordered_list_in_double_ordered_list():
"""
Test to make sure this rule does trigger with a document that has
two nested ordered lists with a bad unordered list with them that
does have extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_unordered_list_in_double_ordered_list.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_unordered_list_in_double_ordered_list.md:3:8: "
+ "MD007: Unordered list indentation "
+ "[Expected: 7, Actual=8] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_unordered_ordered_unordere_ordered_unordered():
"""
Test to make sure this rule does not trigger with a document that has
nested ordered lists and unordered lists, with no extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_unordered_ordered_unordere_ordered_unordered.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_unordered_bad_ordered_unordered_ordered_unordered():
"""
Test to make sure this rule does trigger with a document that has
nested ordered lists and unordered lists, with extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_unordered_bad_ordered_unordered_ordered_unordered.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_unordered_bad_ordered_unordered_ordered_unordered.md:1:2: "
+ "MD007: Unordered list indentation "
+ "[Expected: 0, Actual=1] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_unordered_ordered_unordered_bad_ordered_unordered():
"""
Test to make sure this rule does trigger with a document that has
nested ordered lists and unordered lists, with extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_unordered_ordered_unordered_bad_ordered_unordered.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_unordered_ordered_unordered_bad_ordered_unordered.md:3:7: "
+ "MD007: Unordered list indentation "
+ "[Expected: 6, Actual=7] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_unordered_ordered_unordered_ordered_unordered_bad():
"""
Test to make sure this rule does trigger with a document that has
nested ordered lists and unordered lists, with extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_unordered_ordered_unordered_ordered_unordered_bad.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_unordered_ordered_unordered_ordered_unordered_bad.md:5:12: "
+ "MD007: Unordered list indentation "
+ "[Expected: 11, Actual=12] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_indentation_in_block_quote_level_0():
"""
Test to make sure this rule does trigger with a document that has
nested unordered lists within a block quote, with extra spaces.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_indentation_in_block_quote_level_0.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_indentation_in_block_quote_level_0.md:3:4: "
+ "MD007: Unordered list indentation "
+ "[Expected: 0, Actual=1] (ul-indent)\n"
+ "test/resources/rules/md007/bad_list_indentation_in_block_quote_level_0.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)\n"
+ "test/resources/rules/md007/bad_list_indentation_in_block_quote_level_0.md:5:8: "
+ "MD007: Unordered list indentation "
+ "[Expected: 4, Actual=5] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_text():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a text block.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_text.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_text.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_atx_heading():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after an Atx Heading.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_atx_heading.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_atx_heading.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_thematic_break():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a thematic break.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_thematic_break.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_thematic_break.md:6:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_setext_heading():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a SetExt Heading.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_setext_heading.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_setext_heading.md:5:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_html_block():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a HTML block.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_html_block.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_html_block.md:6:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_fenced_block():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a fenced code block.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_fenced_block.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_fenced_block.md:6:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_indented_block():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after an indented code block.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_indented_block.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_indented_block.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_link_reference_definition():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after a link reference definition.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_link_reference_definition.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_link_reference_definition.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_bad_list_in_block_quote_after_other_list():
"""
Test to make sure this rule does trigger with a document that has
a bad nested unordered lists after another list
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/bad_list_in_block_quote_after_other_list.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md007/bad_list_in_block_quote_after_other_list.md:4:6: "
+ "MD007: Unordered list indentation "
+ "[Expected: 2, Actual=3] (ul-indent)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_unordered_list_elements():
"""
Test to make sure this rule does not trigger with a document that has
many nested unordered lists, each one properly indented.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md007/good_unordered_list_elements.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_list_indentation_by_four():
"""
Test to make sure this rule does not trigger with a document that has
each list indented by 4, but configuration to support it.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md007.indent=$#4",
"scan",
"test/resources/rules/md007/good_list_indentation_by_four.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
@pytest.mark.rules
def test_md007_good_list_indentation_with_start():
"""
Test to make sure this rule does not trigger with a document that has
the level 1 list indented, due to configuration.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md007.start_indented=$!True",
"scan",
"test/resources/rules/md007/good_list_indentation_with_start.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 27.53605
| 109
| 0.68769
| 3,183
| 26,352
| 5.390512
| 0.048068
| 0.028908
| 0.065043
| 0.069705
| 0.965672
| 0.960427
| 0.952559
| 0.949703
| 0.940844
| 0.928022
| 0
| 0.024156
| 0.22708
| 26,352
| 956
| 110
| 27.564854
| 0.818244
| 0.174901
| 0
| 0.673993
| 0
| 0.001832
| 0.284194
| 0.186152
| 0
| 0
| 0
| 0
| 0.056777
| 1
| 0.056777
| false
| 0
| 0.003663
| 0
| 0.06044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b6c60ff992939fb96343007441d2c3b3c58baf13
| 41
|
py
|
Python
|
adobe_sign_api/__init__.py
|
lingthio/adobe_sign_api
|
a82cb13587fa8414f67263d5354c7413edfc44ac
|
[
"MIT"
] | 6
|
2019-09-10T03:34:20.000Z
|
2021-08-08T14:42:00.000Z
|
adobe_sign_api/__init__.py
|
lingthio/adobe_sign_api
|
a82cb13587fa8414f67263d5354c7413edfc44ac
|
[
"MIT"
] | null | null | null |
adobe_sign_api/__init__.py
|
lingthio/adobe_sign_api
|
a82cb13587fa8414f67263d5354c7413edfc44ac
|
[
"MIT"
] | 1
|
2019-09-06T23:05:27.000Z
|
2019-09-06T23:05:27.000Z
|
from .adobe_sign_api import AdobeSignAPI
| 20.5
| 40
| 0.878049
| 6
| 41
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e05612e6454e3a0201075d9aac7b407c9a0e0af
| 221
|
py
|
Python
|
ode_explorer/integrators/__init__.py
|
nicholasjng/ode-explorer
|
0bcb5d4834f6001b2a3e54bd5e000e86bbedf221
|
[
"MIT"
] | 3
|
2021-11-11T02:14:18.000Z
|
2022-03-14T11:18:59.000Z
|
ode_explorer/integrators/__init__.py
|
njunge94/ode-explorer
|
0bcb5d4834f6001b2a3e54bd5e000e86bbedf221
|
[
"MIT"
] | null | null | null |
ode_explorer/integrators/__init__.py
|
njunge94/ode-explorer
|
0bcb5d4834f6001b2a3e54bd5e000e86bbedf221
|
[
"MIT"
] | null | null | null |
from ode_explorer.integrators.integrator_loops import (
constant_h_loop,
adaptive_h_loop
)
from ode_explorer.integrators.integrator import Integrator
from ode_explorer.integrators.loop_factory import loop_factory
| 31.571429
| 62
| 0.855204
| 29
| 221
| 6.172414
| 0.413793
| 0.117318
| 0.251397
| 0.435754
| 0.402235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104072
| 221
| 6
| 63
| 36.833333
| 0.90404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8e11efc1468ab165d3d0fc871df7995ea5f39913
| 130
|
py
|
Python
|
kanagata/__init__.py
|
podhmo/kanagata
|
078d4e87ae474b873690b0905ae1557c5e5b4846
|
[
"MIT"
] | null | null | null |
kanagata/__init__.py
|
podhmo/kanagata
|
078d4e87ae474b873690b0905ae1557c5e5b4846
|
[
"MIT"
] | null | null | null |
kanagata/__init__.py
|
podhmo/kanagata
|
078d4e87ae474b873690b0905ae1557c5e5b4846
|
[
"MIT"
] | null | null | null |
from kanagata.builder import RestrictionBuilder # NOQA
from kanagata.builder import Module # NOQA
Builder = RestrictionBuilder
| 26
| 55
| 0.823077
| 14
| 130
| 7.642857
| 0.5
| 0.224299
| 0.35514
| 0.46729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 130
| 4
| 56
| 32.5
| 0.955357
| 0.069231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8e20378cb6c72af2f65ef451864ac0fc081b93ee
| 9,267
|
py
|
Python
|
airflow/tests/test_sync_documents_to_kernel.py
|
cesarbruschetta/opac-airflow
|
7deaf1d9ba64bfdb57081698308219f95200fca5
|
[
"BSD-2-Clause"
] | null | null | null |
airflow/tests/test_sync_documents_to_kernel.py
|
cesarbruschetta/opac-airflow
|
7deaf1d9ba64bfdb57081698308219f95200fca5
|
[
"BSD-2-Clause"
] | null | null | null |
airflow/tests/test_sync_documents_to_kernel.py
|
cesarbruschetta/opac-airflow
|
7deaf1d9ba64bfdb57081698308219f95200fca5
|
[
"BSD-2-Clause"
] | null | null | null |
from unittest import TestCase, main
from unittest.mock import patch, MagicMock
from airflow import DAG
from sync_documents_to_kernel import (
list_documents,
delete_documents,
register_update_documents,
)
class TestListDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_gets_sps_package_from_dag_run_conf(self, mk_list_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
list_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_calls_list_documents_operation(self, mk_list_documents):
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
list_documents(**kwargs)
mk_list_documents.assert_called_once_with("path_to_sps_package/package.zip")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_pushes_xmls_from_packages(self, mk_list_documents):
expected = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
mk_list_documents.return_value = expected
list_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="xmls_filenames", value=expected
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_doesnt_call_ti_xcom_push_if_no_xml_files(
self, mk_list_documents
):
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
mk_list_documents.return_value = []
list_documents(**kwargs)
kwargs["ti"].xcom_push.assert_not_called()
class TestDeleteDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_gets_sps_package_from_dag_run_conf(
self, mk_delete_documents
):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
delete_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_gets_ti_xcom_info(self, mk_delete_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
delete_documents(**kwargs)
kwargs["ti"].xcom_pull.assert_called_once_with(
key="xmls_filenames", task_ids="list_docs_task_id"
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_empty_ti_xcom_info(self, mk_delete_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = None
delete_documents(**kwargs)
mk_delete_documents.assert_not_called()
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_calls_delete_documents_operation(
self, mk_delete_documents
):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
delete_documents(**kwargs)
mk_delete_documents.assert_called_once_with(
"path_to_sps_package/package.zip", xmls_filenames
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_pushes_xmls_to_preserve(self, mk_delete_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
xmls_to_preserve = [
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_delete_documents.return_value = xmls_to_preserve
delete_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="xmls_to_preserve", value=xmls_to_preserve
)
class TestRegisterUpdateDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_gets_sps_package_from_dag_run_conf(
self, mk_register_update_documents
):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
register_update_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_gets_ti_xcom_info(self, mk_register_update_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
register_update_documents(**kwargs)
kwargs["ti"].xcom_pull.assert_called_once_with(
key="xmls_to_preserve", task_ids="delete_docs_task_id"
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_empty_ti_xcom_info(self, mk_register_update_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = None
register_update_documents(**kwargs)
mk_register_update_documents.assert_not_called()
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_calls_register_update_documents_operation(
self, mk_register_update_documents
):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
register_update_documents(**kwargs)
mk_register_update_documents.assert_called_once_with(
"path_to_sps_package/package.zip", xmls_filenames
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_does_not_push_if_no_documents_into_kernel(self, mk_register_update_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_register_update_documents.return_value = []
register_update_documents(**kwargs)
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_pushes_documents(self, mk_register_update_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
documents = [
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_register_update_documents.return_value = documents
register_update_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="documents", value=documents
)
if __name__ == "__main__":
main()
| 44.128571
| 117
| 0.697097
| 1,242
| 9,267
| 4.733494
| 0.067633
| 0.060214
| 0.055792
| 0.110733
| 0.886715
| 0.886715
| 0.885695
| 0.871067
| 0.865623
| 0.829393
| 0
| 0.041577
| 0.195425
| 9,267
| 209
| 118
| 44.339713
| 0.746915
| 0
| 0
| 0.668449
| 0
| 0
| 0.266861
| 0.233085
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.080214
| false
| 0
| 0.02139
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f3e127752334b9fc6796ea686021ce802492f31a
| 80
|
py
|
Python
|
src/pypidata/utils.py
|
pfmoore/temp
|
3266ff516d919e3deb7027dbcc710259985f24d4
|
[
"MIT"
] | 1
|
2021-05-12T07:01:27.000Z
|
2021-05-12T07:01:27.000Z
|
src/pypidata/utils.py
|
pfmoore/temp
|
3266ff516d919e3deb7027dbcc710259985f24d4
|
[
"MIT"
] | null | null | null |
src/pypidata/utils.py
|
pfmoore/temp
|
3266ff516d919e3deb7027dbcc710259985f24d4
|
[
"MIT"
] | null | null | null |
import re
def normalize(name):
return re.sub(r"[-_.]+", "-", name).lower()
| 16
| 47
| 0.575
| 11
| 80
| 4.090909
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1625
| 80
| 4
| 48
| 20
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f3f790579d8ef665c2b11e24dc6a07c6cc75c46f
| 10,135
|
py
|
Python
|
tests/resources/lib/routes/test_animelist.py
|
jdollarKodi/plugin.video.animepie
|
874e58e153e2df53e5a47ec963584de16584ae52
|
[
"MIT"
] | null | null | null |
tests/resources/lib/routes/test_animelist.py
|
jdollarKodi/plugin.video.animepie
|
874e58e153e2df53e5a47ec963584de16584ae52
|
[
"MIT"
] | null | null | null |
tests/resources/lib/routes/test_animelist.py
|
jdollarKodi/plugin.video.animepie
|
874e58e153e2df53e5a47ec963584de16584ae52
|
[
"MIT"
] | null | null | null |
import sys
import os
import json
import unittest
from mock import call, patch, MagicMock, Mock, ANY
# TODO: Check get params of request to ensure those match what is expected
class TestAnimeList(unittest.TestCase):
def setUp(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock_requests = MagicMock()
self.mock_xbmc_plugin = MagicMock()
self.mock_xbmc_gui = MagicMock()
self.mock_route_factory = MagicMock()
modules = {
"requests": self.mock_requests,
"xbmcplugin": self.mock_xbmc_plugin,
"xbmcgui": self.mock_xbmc_gui,
"xbmcadddon": MagicMock(),
"resolveurl": MagicMock(),
"resources.lib.router_factory": self.mock_route_factory
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
def tearDown(self):
self.module_patcher.stop()
def test_generate_routes(self):
from resources.lib.routes.animelist import generate_routes, anime_list
mock_plugin = MagicMock()
generate_routes(mock_plugin)
mock_plugin.add_route.assert_has_calls([
call(anime_list, '/anime-list'),
])
def test_get_current_params_returns_values_if_passed_in(self):
from resources.lib.routes.animelist import _get_current_params
expected_year = "2000"
expected_season = "Winter"
expected_genre = "Test,Test2"
expected_page = "Page"
mock_plugin = type('', (), {})
mock_plugin.args = {
"year": [expected_year],
"season": [expected_season],
"genres": [expected_genre],
"page": [expected_page],
}
args = _get_current_params(mock_plugin)
self.assertDictEqual(args, {
"year": expected_year,
"season": expected_season,
"genres": expected_genre,
"page": expected_page
}, "Returned parameter list does not match plugin.arg values")
def test_get_current_params_returns_empty_if_none(self):
from resources.lib.routes.animelist import _get_current_params
mock_plugin = type('', (), {})
mock_plugin.args = {}
args = _get_current_params(mock_plugin)
self.assertDictEqual(args, {}, "Returned parameter list does not match plugin.arg values")
def test_successful_retrieval_page_one_none_page(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {}
mock_plugin.handle = handle_val
mock_plugin.url_for = MagicMock()
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json.return_value = json.loads(mock_response)
self.mock_requests.get.return_value = res_mock
from resources.lib.routes.animelist import anime_list
anime_list()
self.mock_xbmc_gui.ListItem.assert_has_calls([
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Next Page')
])
def test_successful_retrieval_page_one_with_selected(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {
"season": ["Summer"],
"year": ["2018"],
"genres": ["Test1,Test2"],
"page": ["1"]
}
mock_plugin.handle = handle_val
mock_plugin.url_for = Mock(return_value=mock_url)
mock_route_factory = MagicMock()
mock_route_factory.get_router_instance = mock_plugin
sys.modules['resources.lib.router_factory'] = mock_route_factory
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json = Mock(return_value=json.loads(mock_response))
self.mock_requests.get = Mock(return_value=res_mock)
from resources.lib.routes.animelist import anime_list
anime_list()
self.mock_requests.get.assert_called_once_with(
'https://api.animepie.to/Anime/AnimeMain/List',
params={
'sort': 1,
'website': '',
'genres': 'Test1,Test2',
'season': 'Summer',
'limit': 15,
'year': 2018,
'sort2': '',
'page': 1
}
)
self.mock_xbmc_gui.ListItem.assert_has_calls([
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Next Page')
])
# Need to check for order of list items added
self.mock_xbmc_plugin.addDirectoryItem.assert_has_calls([
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
]
)
def test_successful_retrieval_no_next_on_last_page(self):
handle_val = "Random"
mock_url = "Random-url"
mock_plugin = type('', (), {})
mock_plugin.args = {
"season": ["Summer"],
"year": ["2018"],
"genres": ["Test1,Test2"],
"page": ["8"]
}
mock_plugin.handle = handle_val
mock_plugin.url_for = Mock(return_value=mock_url)
mock_route_factory = MagicMock()
mock_route_factory.get_router_instance = mock_plugin
sys.modules['resources.lib.router_factory'] = mock_route_factory
fixture_path = self.dir_path + "/fixtures/animeList/list_success.json"
with open(fixture_path, "r") as fixture:
mock_response = fixture.read()
res_mock = MagicMock()
res_mock.json = Mock(return_value=json.loads(mock_response))
self.mock_requests.get = Mock(return_value=res_mock)
from resources.lib.routes.animelist import anime_list
anime_list()
expected_list_item_calls = [
call('Gintama.: Shirogane no Tamashii-hen 2'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Silver Soul Arc - Second Half War'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
call('Gintama.: Shirogane no Tamashii-hen - Kouhan-sen'),
call().setArt({'icon': 'https://myanimelist.cdn-dena.com/images/anime/1518/95051.jpg'}),
call().setInfo(infoLabels={'plot': 'Second Season of the final arc of Gintama.'}, type='video'),
]
self.assertEquals(self.mock_xbmc_gui.ListItem.call_count, 3)
self.mock_xbmc_gui.ListItem.assert_has_calls(expected_list_item_calls)
self.mock_requests.get.assert_called_once_with(
'https://api.animepie.to/Anime/AnimeMain/List',
params={
'sort': 1,
'website': '',
'genres': 'Test1,Test2',
'season': 'Summer',
'limit': 15,
'year': 2018,
'sort2': '',
'page': 8
}
)
# Need to check for order of list items added
expected_calls = [
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
call(
handle_val,
mock_url,
ANY,
True
),
]
self.assertEquals(self.mock_xbmc_plugin.addDirectoryItem.call_count, 3)
self.mock_xbmc_plugin.addDirectoryItem.assert_has_calls(expected_calls)
| 35.812721
| 108
| 0.569906
| 1,110
| 10,135
| 4.982883
| 0.154955
| 0.041584
| 0.023865
| 0.030917
| 0.82571
| 0.809619
| 0.776351
| 0.768939
| 0.745796
| 0.707286
| 0
| 0.018125
| 0.308633
| 10,135
| 282
| 109
| 35.939716
| 0.771229
| 0.015787
| 0
| 0.673913
| 0
| 0
| 0.222044
| 0.019557
| 0
| 0
| 0
| 0.003546
| 0.052174
| 1
| 0.034783
| false
| 0.004348
| 0.047826
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6d2329b8d31eee10bb5684e2f20c9950da9ce10b
| 51
|
py
|
Python
|
asaplib/util/__init__.py
|
BingqingCheng/ASAP
|
a92dd34eaa092dcbe46163e000ebd2ccee22f8ae
|
[
"MIT"
] | 74
|
2020-01-09T10:38:39.000Z
|
2022-03-04T15:09:05.000Z
|
asaplib/util/__init__.py
|
FelixFaber/ASAP
|
951d9667143095e42f1566816b4ab90d901b56a8
|
[
"MIT"
] | 31
|
2020-01-30T13:15:42.000Z
|
2022-03-03T05:42:51.000Z
|
asaplib/util/__init__.py
|
FelixFaber/ASAP
|
951d9667143095e42f1566816b4ab90d901b56a8
|
[
"MIT"
] | 14
|
2020-02-23T15:03:31.000Z
|
2022-03-04T15:04:04.000Z
|
from .util_fft import *
from .util_gettcv import *
| 17
| 26
| 0.764706
| 8
| 51
| 4.625
| 0.625
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 27
| 25.5
| 0.860465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6d28e87999e663608b066185ce3b54411a954c2f
| 14,797
|
py
|
Python
|
src/nninst/backend/tensorflow/attack/foolbox_attack_resnet_50.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | null | null | null |
src/nninst/backend/tensorflow/attack/foolbox_attack_resnet_50.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | null | null | null |
src/nninst/backend/tensorflow/attack/foolbox_attack_resnet_50.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
from foolbox.attacks import (
FGSM,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
from nninst import mode
from nninst.backend.tensorflow.attack.common import (
get_overlay_summary_compare,
get_overlay_summary_compare_detail,
resnet_50_imagenet_overlap_ratio_top5_diff,
)
from nninst.backend.tensorflow.attack.cw_attack import cw_generate_adversarial_example
from nninst.backend.tensorflow.attack.cw_attacks import CarliniL2
from nninst.backend.tensorflow.attack.foolbox_attack import (
foolbox_generate_adversarial_example,
)
from nninst.backend.tensorflow.dataset import imagenet
from nninst.backend.tensorflow.dataset.imagenet_preprocessing import _CHANNEL_MEANS
from nninst.backend.tensorflow.trace.resnet_50_imagenet_class_trace_v3 import (
resnet_50_imagenet_class_trace_compact,
)
from nninst.statistics import calc_trace_side_overlap_both_compact
from nninst.trace import TraceKey
from nninst.utils.numpy import arg_approx
from nninst.utils.ray import ray_init
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
if __name__ == "__main__":
mode.debug()
# mode.distributed()
# mode.local()
# ray_init("gpu")
# ray_init("dell")
ray_init()
threshold = 0.5
# threshold = 1
# threshold = 0.8
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
"DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
"CWL2": [CarliniL2],
}
label = None
# label = "best_in_10"
# label = "worst_in_10"
# label = "import"
# label = "norm"
print(f"attack model with label {label} using Foolbox")
for attack_name in [
"DeepFool",
# "FGSM",
# "BIM",
# "JSMA",
# "DeepFool_full",
# "CWL2",
]:
for threshold in [
# 1.0,
# 0.9,
# 0.7,
0.5,
# 0.3,
# 0.1,
]:
print(f"attack: {attack_name}")
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_train.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.weighted_iou.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_error.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_rand.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_rand.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_class_1.foolbox.csv"
# path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top5_diff_all.foolbox.csv"
path_template = "resnet_50_imagenet_class_overlap_ratio_{0:.1f}_{1}_{2}_top2_diff_all.foolbox.csv"
# overlap_fn = calc_trace_side_overlap_compact
overlap_fn = calc_trace_side_overlap_both_compact
# overlap_fn = calc_weighted_iou
# per_channel = True
per_channel = False
# resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio_top5(
# resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio(
resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio_top5_diff(
attack_name=attack_name,
attack_fn=attacks[attack_name][0],
generate_adversarial_fn=cw_generate_adversarial_example
if attack_name.startswith("CW")
else foolbox_generate_adversarial_example,
class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace_compact(
class_id, threshold, label=label
),
# class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace(class_id, threshold, label=label),
select_fn=lambda input: arg_approx(input, threshold),
overlap_fn=overlap_fn,
# overlap_fn=calc_iou,
# overlap_fn=calc_class_trace_side_overlap,
# overlap_fn=calc_class_trace_side_overlap_norm,
# overlap_fn=calc_weighted_iou,
path=path_template.format(threshold, attack_name, label),
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.iou.csv'.format(threshold, attack_name, label),
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.wo_pool.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side_norm.csv'.format(
# path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.weighted_iou.csv'.format(
# threshold, attack_name, label),
preprocessing=(_CHANNEL_MEANS, 1),
bounds=(0, 255),
channel_axis=3,
image_size=224,
class_num=1001,
norm_fn=imagenet.normalize,
data_format="channels_last",
per_channel=per_channel,
**(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
)
# resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio_error(
# class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace_compact(class_id, threshold, label=label),
# select_fn=lambda input: arg_approx(input, threshold),
# overlap_fn=overlap_fn,
# path=path_template.format(threshold, attack_name, label),
# )
# resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio_rand(
# resnet_50_overlap_ratio = resnet_50_imagenet_overlap_ratio_top5_rand(
# class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace_compact(class_id, threshold, label=label),
# select_fn=lambda input: arg_approx(input, threshold),
# overlap_fn=overlap_fn,
# path=path_template.format(threshold, attack_name, label),
# )
# resnet_50_overlap_ratio.save()
# print("edge:")
# summary = get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.EDGE)
# summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.csv".format(
# threshold=threshold, label=label)
# file_exists = os.path.exists(summary_file)
# with open(summary_file, "a") as csv_file:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# if not file_exists:
# writer.writeheader()
# writer.writerow({"attack": attack_name, **summary})
# print(summary)
# print("weight:")
# print(get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.WEIGHT))
# print("point:")
# print(get_overlay_summary(lenet_overlap_ratio.load(), TraceKey.POINT))
# summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_diff_all_compare.{key}.csv"
summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top2_diff_all_compare.{key}.csv"
# key = TraceKey.EDGE
# # summary_file = "alexnet_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.{key}.csv".format(
# summary_file = summary_path_template.format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.iou.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.wo_pool.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side_norm.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.weighted_iou.csv".format(
# threshold=threshold, attack=attack_name, label=label, key=key)
# with open(summary_file, "w") as csv_file:
# has_header = False
# for overlay_threshold in np.linspace(-1, 1, 201):
# # summary = get_overlay_summary(alexnet_overlap_ratio.load(), key, overlay_threshold)
# # summary = get_overlay_summary_top1(alexnet_overlap_ratio.load(), key, overlay_threshold)
# summary = get_overlay_summary_compare(resnet_50_overlap_ratio.load(), key, float(overlay_threshold))
# # summary = get_overlay_summary_compare_filter(alexnet_overlap_ratio.load(), key, float(overlay_threshold))
# # summary = get_overlay_summary_one_side(alexnet_overlap_ratio.load(), key, overlay_threshold)
# if not has_header:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# writer.writeheader()
# has_header = True
# writer.writerow({"attack": attack_name, **summary})
summary_file = summary_path_template.format(
threshold=threshold, attack=attack_name, label=label, key="detail"
)
get_overlay_summary_compare_detail(
summary_file, resnet_50_overlap_ratio.load(), from_zero=False
).save()
# for overlay_threshold in np.arange(0, 1.01, 0.01):
# # summary = get_overlay_summary(resnet_50_overlap_ratio.load(), TraceKey.EDGE, overlay_threshold)
# for key in [TraceKey.EDGE, TraceKey.WEIGHT]:
# summary = get_overlay_summary(resnet_50_overlap_ratio.load(), key, overlay_threshold)
# # summary = get_overlay_summary_one_side(resnet_50_overlap_ratio.load(), key, overlay_threshold)
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_train.{key}.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_train.{key}.weighted_iou.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_error.{key}.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_rand.{key}.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5_rand.{key}.csv"
# # summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_top5.{key}.csv"
# summary_path_template = "resnet_50_imagenet_class_overlap_ratio_summary_{threshold:.1f}_{attack}_{label}_class_1.{key}.csv"
# summary_file = summary_path_template.format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.iou.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.wo_pool.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.class_side_norm.csv".format(
# # summary_file = "lenet_class_overlap_ratio_summary_{threshold:.1f}_{label}.weighted_iou.csv".format(
# threshold=threshold, attack=attack_name, label=label, key=key)
# file_exists = os.path.exists(summary_file)
# with open(summary_file, "a") as csv_file:
# headers = ["attack"] + list(summary.keys())
# writer = csv.DictWriter(csv_file, delimiter=',', lineterminator='\n', fieldnames=headers)
# if not file_exists:
# writer.writeheader()
# writer.writerow({"attack": attack_name, **summary})
# resnet_50_overlap_ratio_per_node = resnet_50_imagenet_overlap_ratio(
# attack_fn=attacks[attack_name][0],
# generate_adversarial_fn=generate_adversarial_example,
# class_trace_fn=lambda class_id: resnet_50_imagenet_class_trace_compact(class_id, threshold, label=label),
# # class_trace_fn=lambda class_id: lenet_mnist_class_trace(class_id, threshold),
# select_fn=lambda input: arg_approx(input, threshold),
# overlap_fn=calc_trace_side_overlap_compact,
# # overlap_fn=calc_iou,
# # overlap_fn=calc_class_trace_side_overlap,
# # overlap_fn=calc_class_trace_side_overlap_norm,
# # overlap_fn=calc_weighted_iou,
# path='resnet_50_imagenet_class_overlap_ratio_per_node_{0:.1f}_{1}_{2}.foolbox.csv'.format(
# threshold, attack_name, label),
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.iou.csv'.format(threshold, attack_name, label),
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.wo_pool.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.class_side_norm.csv'.format(
# # path='lenet_class_overlap_ratio_{0:.1f}_{1}_{2}.foolbox.weighted_iou.csv'.format(
# # threshold, attack_name, label),
# preprocessing=(_CHANNEL_MEANS, 1),
# per_node=True,
# **(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
# )
# resnet_50_overlap_ratio_per_node.save()
| 59.906883
| 158
| 0.640738
| 1,706
| 14,797
| 5.080891
| 0.100234
| 0.099677
| 0.084333
| 0.067836
| 0.811721
| 0.765805
| 0.73258
| 0.709276
| 0.700969
| 0.639825
| 0
| 0.025303
| 0.252146
| 14,797
| 246
| 159
| 60.150407
| 0.757997
| 0.627019
| 0
| 0.02439
| 0
| 0
| 0.065817
| 0.035815
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.182927
| 0
| 0.182927
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6d4c98d5013f159132ac60e600d786ee3cdc259c
| 25
|
py
|
Python
|
dataloader/__init__.py
|
nikhil1024/PSMNet
|
ca5bf6753e84bf448895db42d498a137ed722594
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
nikhil1024/PSMNet
|
ca5bf6753e84bf448895db42d498a137ed722594
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
nikhil1024/PSMNet
|
ca5bf6753e84bf448895db42d498a137ed722594
|
[
"MIT"
] | null | null | null |
from dataloader import *
| 12.5
| 24
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e2a1a4a4d088d0f6af54806d09475266b32060c
| 156
|
py
|
Python
|
servidor_cliente/vax_control/exceptions.py
|
PedroLucasMiguel/trabalho-engenharia-de-software
|
e95527f085005c3b717bdc0923f5d14acd65a6f4
|
[
"MIT"
] | null | null | null |
servidor_cliente/vax_control/exceptions.py
|
PedroLucasMiguel/trabalho-engenharia-de-software
|
e95527f085005c3b717bdc0923f5d14acd65a6f4
|
[
"MIT"
] | null | null | null |
servidor_cliente/vax_control/exceptions.py
|
PedroLucasMiguel/trabalho-engenharia-de-software
|
e95527f085005c3b717bdc0923f5d14acd65a6f4
|
[
"MIT"
] | null | null | null |
class InvalidFullNameException(Exception):
pass
class InvalidCPFException(Exception):
pass
class EmergencyMeetingException(Exception):
pass
| 14.181818
| 43
| 0.782051
| 12
| 156
| 10.166667
| 0.5
| 0.319672
| 0.295082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160256
| 156
| 10
| 44
| 15.6
| 0.931298
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
1e36f1e2ae5ddfe99dd5a797c7fd43899e122c3b
| 39
|
py
|
Python
|
jinahub/segmenters/PDFSegmenter/__init__.py
|
sauravgarg540/executors
|
c06a16633767346eee96ec019ae6a171f125f6cb
|
[
"Apache-2.0"
] | null | null | null |
jinahub/segmenters/PDFSegmenter/__init__.py
|
sauravgarg540/executors
|
c06a16633767346eee96ec019ae6a171f125f6cb
|
[
"Apache-2.0"
] | null | null | null |
jinahub/segmenters/PDFSegmenter/__init__.py
|
sauravgarg540/executors
|
c06a16633767346eee96ec019ae6a171f125f6cb
|
[
"Apache-2.0"
] | null | null | null |
from .pdf_segmenter import PDFSegmenter
| 39
| 39
| 0.897436
| 5
| 39
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1e79370c0eaab1ed491a5469eb8e029e278bec8b
| 44
|
py
|
Python
|
chat/__main__.py
|
oinqu/network-protocol-design-course
|
3f190c1a5423a472e98af94e00da08a7c4718028
|
[
"MIT"
] | null | null | null |
chat/__main__.py
|
oinqu/network-protocol-design-course
|
3f190c1a5423a472e98af94e00da08a7c4718028
|
[
"MIT"
] | null | null | null |
chat/__main__.py
|
oinqu/network-protocol-design-course
|
3f190c1a5423a472e98af94e00da08a7c4718028
|
[
"MIT"
] | null | null | null |
from App import App
app = App()
app.main()
| 8.8
| 19
| 0.659091
| 8
| 44
| 3.625
| 0.5
| 0.62069
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 44
| 4
| 20
| 11
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
94afc3801ad33ac542a38c7c118aaf3297b38ae2
| 64
|
py
|
Python
|
matrix/__init__.py
|
exted/matrix
|
365421e8e462872087173e41c63adee96f76d207
|
[
"MIT"
] | null | null | null |
matrix/__init__.py
|
exted/matrix
|
365421e8e462872087173e41c63adee96f76d207
|
[
"MIT"
] | null | null | null |
matrix/__init__.py
|
exted/matrix
|
365421e8e462872087173e41c63adee96f76d207
|
[
"MIT"
] | null | null | null |
from . import nodes
from . import algos
from . import simulation
| 21.333333
| 24
| 0.78125
| 9
| 64
| 5.555556
| 0.555556
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 64
| 3
| 24
| 21.333333
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94ccca7ffc987cbe5df4c550743671afebadd79c
| 507
|
py
|
Python
|
benchmarks/models/__init__.py
|
maudl3116/GPS
|
381a8e209bfec4c06b43ed4b69445bdf5c365409
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/models/__init__.py
|
maudl3116/GPS
|
381a8e209bfec4c06b43ed4b69445bdf5c365409
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/models/__init__.py
|
maudl3116/GPS
|
381a8e209bfec4c06b43ed4b69445bdf5c365409
|
[
"Apache-2.0"
] | 1
|
2021-01-30T12:58:46.000Z
|
2021-01-30T12:58:46.000Z
|
from .train_gpkconv1d import train_gpkconv1d_classifier
from .train_gprnn import train_gprnn_classifier
from .train_gpsig import train_gpsig_classifier
from .train_gpsig_ import train_gpsig_classifier_
from .train_gpsig_regression import train_gpsig_regressor
from .train_gpsig_vosf import train_gpsig_vosf_classifier
from .train_gpsig_vosf_regression import train_gpsig_vosf_regressor
from .train_gpsigrnn import train_gpsigrnn_classifier
from .train_gpsigrnn_vosf import train_gpsigrnn_vosf_classifier
| 39
| 67
| 0.905325
| 71
| 507
| 5.943662
| 0.15493
| 0.236967
| 0.270142
| 0.227488
| 0.246446
| 0.246446
| 0.246446
| 0.246446
| 0.246446
| 0.246446
| 0
| 0.004274
| 0.076923
| 507
| 12
| 68
| 42.25
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94dfdb1b54e79583c61a59cf0f6bbffe1fd6e3cf
| 13,086
|
py
|
Python
|
numerics/ODE.py
|
MGokcayK/numerics
|
17d199f475a004911359db568d7b83939aa12481
|
[
"MIT"
] | null | null | null |
numerics/ODE.py
|
MGokcayK/numerics
|
17d199f475a004911359db568d7b83939aa12481
|
[
"MIT"
] | null | null | null |
numerics/ODE.py
|
MGokcayK/numerics
|
17d199f475a004911359db568d7b83939aa12481
|
[
"MIT"
] | null | null | null |
"""
Author : Mehmet Gokcay Kabatas
Mail : mgokcaykdev@gmail.com
Version : 0.1
Date : 04/12/2019
Update : 14/12/2019
Python : 3.6.5
Update Note : Arranging system of ODE methods and descriptions.
This script written by @Author for personal usage.
Prerequest : numpy
"""
import numpy as np
class ODE():
"""
This class written for numerical methods for Ordinary
Differential Equations(ODE).
@Methods :
- Euler
- Heun
- Midpoint
- RK2
- RK3
- RK4
- RK5
- System of ODE's Euler
- System of ODE's RK4
@Usage :
...
solver = ODE()
solver.@Methods
...
"""
def Euler(self, xi, xf, yi, h, dydx):
""" Euler Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Euler(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next = yi + dydx(xi,yi) * h
xi += h
yi = y_next
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemEuler(self, xi, xf, yi, h, dydx):
""" Euler Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x for each function.
xf = Final value of x for each function.
yi = Initial value of y for each function.
h = Step size.
dydx : Target functions's derivative function
which argument depend on args.
@Args :
Order of parameter of function should be same. \n
If f1(x,y1,y2,...) and f2(x,y1,y2,...) then function's arguments should be in array args = [x,y1,y2,...]. \n
@ Example :
dy1dx : -0.5x + y1
dy2dx : 0.2y1 + 0.6y2 - 3x
: First function x parameter (x) in args[0] and y
parameter (y1) in args[1]. \n
: Second function y
parameter (y2) in args[2].
def df1(args):
return (-0.5 args[0] + args[1])
def df2(args):
return (0.2 args[1] + 0.6 args[2] - 3 args[0])
...
solver = ODE()
solver.SystemEuler(0,5,[2,2],0.2,[df1,df2])
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr, args = np.array([xi]), np.array([yi]), []
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
args.append(xi)
for g in range(len(dydx)):
args.append(yi[g])
for j in range(len(dydx)):
yi[j] = yi[j] + dydx[j](args) * h
xi += h
x_arr = np.append(x_arr,[xi],0)
y_arr = np.append(y_arr,[yi],0)
args = []
return x_arr, y_arr
def Heun(self, xi, xf, yi, h, dydx):
""" Heun Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Heun(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_0 = yi + dydx(xi,yi) * h
y_next_1 = dydx(xi + h, y_next_0)
yi = yi + (dydx(xi,yi) + y_next_1) / 2 * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def Midpoint(self, xi, xf, yi, h, dydx):
""" Midpoint Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Midpoint(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_hl = yi + dydx(xi,yi) * h / 2
yi = yi + dydx(xi + h/2, y_next_hl) * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK2(self, xi, xf, yi, h, a1, a2, p1, q11, dydx):
""" Second Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
a1, a2, p1, q11 = Calculation constants.
@Prop:
a1 + a2 = 1
a2 . p1 = 1/2
a2 . q11 = 1/2
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK2(0,5,2,0.2,1/2,1/2,1,1,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + p1 * h, yi + q11 * k1 * h)
yi = yi + (a1*k1 + a2*k2)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK3(self, xi, xf, yi, h, dydx):
""" Third Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK3(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + h, yi - k1*h + 2*k2*h)
yi = yi + 1/6 * (k1 + 4*k2 + k3)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK4(self, xi, xf, yi, h, dydx):
""" Fourth Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK4(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + 1/2 * h, yi + 1/2 * k2 * h)
k4 = dydx(xi + h , yi + k3 * h)
yi = yi + 1/6 * (k1 + 2*k2 + 2*k3 + k4)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemRK4(self, xi, xf, yi, h, dydx):
""" Forth Order Runge Kutta Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'args'.
@Args :
Order of parameter of function should be same. \n
If f1(x,y1,y2,...) and f2(x,y1,y2,...) then function's arguments should be in array args = [x,y1,y2,...]. \n
@ Example :
dy1dx : -0.5x + y1
dy2dx : 0.2y1 + 0.6y2 - 3x
: First function x parameter (x) in args[0] and y
parameter (y1) in args[1]. \n
: Second function y
parameter (y2) in args[2].
def df1(args):
return (-0.5 args[0] + args[1])
def df2(args):
return (0.2 args[1] + 0.6 args[2] - 3 args[0])
...
solver = ODE()
solver.SystemRK4(0,5,[2,2],0.2,[df1,df2])
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr, args = np.array([xi]), np.array([yi]), []
k_arr = np.empty((4,len(dydx)))
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
args.append(xi)
for g in range(len(dydx)):
args.append(yi[g])
for i in range(len(dydx)):
k_arr[0][i] = dydx[i](args)
args[0] = xi + 1/2 * h
for i in range(len(dydx)):
args[i+1] = yi[i] + 1/2 * k_arr[0][i] * h
k_arr[1][i] = dydx[i](args)
args[0] = xi + 1/2 * h
for i in range(len(dydx)):
args[i+1] = yi[i] + 1/2 * k_arr[1][i] * h
k_arr[2][i] = dydx[i](args)
args[0] = xi + h
for i in range(len(dydx)):
args[i+1] = yi[i] + k_arr[2][i] * h
k_arr[3][i] = dydx[i](args)
yi[i] = yi[i] + 1/6 * (k_arr[0][i] + 2*k_arr[1][i] + 2*k_arr[2][i] + k_arr[3][i])*h
xi += h
x_arr = np.append(x_arr,[xi],0)
y_arr = np.append(y_arr,[yi],0)
args = []
return x_arr, y_arr
def RK5(self, xi, xf, yi, h, dydx):
""" Fifth Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK5(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/4 * h, yi + 1/4 * k1 * h)
k3 = dydx(xi + 1/4 * h, yi + 1/8 * k1 * h + 1/8 * k2 * h)
k4 = dydx(xi + 1/2 * h, yi - 1/2 * k2 * h + k3 * h)
k5 = dydx(xi + 3/4 * h, yi + 3/16 * k1 * h + 9/16 * k4 * h)
k6 = dydx(xi + h , yi - 3/7 * k1 * h + 2/7 * k2 * h + 12/7 * k3 * h - 12/7 * k4 * h + 8/7 * k5 * h)
yi = yi + 1/90 * (7 * k1 + 32*k3 + 12*k4 + 32*k5 + 7*k6)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
| 25.558594
| 120
| 0.404707
| 1,763
| 13,086
| 2.946115
| 0.087351
| 0.029265
| 0.025992
| 0.041586
| 0.815556
| 0.79072
| 0.75876
| 0.752214
| 0.752214
| 0.744513
| 0
| 0.050864
| 0.460645
| 13,086
| 511
| 121
| 25.608611
| 0.685038
| 0.446661
| 0
| 0.681481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.007407
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf5f3141b7f3a28885d055ded4aacbbcd9bfd9e3
| 9,432
|
py
|
Python
|
apis_highlighter/highlighter.py
|
acdh-oeaw/apis_highlighter
|
2d368ee7fb9c6096a557ec2a24a1864cd776f5f1
|
[
"MIT"
] | 1
|
2019-07-13T10:52:32.000Z
|
2019-07-13T10:52:32.000Z
|
apis_highlighter/highlighter.py
|
acdh-oeaw/apis_highlighter
|
2d368ee7fb9c6096a557ec2a24a1864cd776f5f1
|
[
"MIT"
] | 3
|
2018-08-01T13:44:55.000Z
|
2020-01-22T09:28:36.000Z
|
apis_highlighter/highlighter.py
|
acdh-oeaw/apis_highlighter
|
2d368ee7fb9c6096a557ec2a24a1864cd776f5f1
|
[
"MIT"
] | null | null | null |
import re
from django.contrib.contenttypes.models import ContentType
from apis_core.apis_metainfo.models import Text
from apis_highlighter.models import Annotation
def highlight_text_new(*args, **kwargs):
ann_proj_pk = kwargs.pop("set_ann_proj", False)
types = kwargs.pop("types", False)
users_show = kwargs.pop("users_show", False)
inline_annotations = kwargs.pop("inline_annotations", True)
t_start = 0
t_end = 0
obj = args[-1]
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
lst_annot = []
queries = dict()
if users_show:
queries["user_added_id__in"] = users_show
if ann_proj_pk:
queries["annotation_project__pk"] = ann_proj_pk
queries["text"] = obj
anns1 = Annotation.objects.filter(**queries).order_by("start")
anns_fin = []
for ann in anns1:
# for lb in re.finditer(r"[\r\n]", obj.text):
# if lb.start() < ann.start + (lb.end() - lb.start()):
# ann.start += lb.end() - lb.start()
# ann.end += lb.end() - lb.start()
anns_fin.append(ann)
for an in anns_fin:
if types:
m = an.entity_link
if m is not None:
t = ContentType.objects.get_for_model(m)
if not (str(t.pk) in types):
continue
# c_start = re.findall(r"[\r\n]+", obj.text[: an.start])
# if len(c_start) > 0:
# an.start += len("".join(c_start))
# c_end = re.findall(r"[\r\n]+", obj.text[: an.end])
# if len(c_end) > 0:
# an.end += len("".join(c_end))
if an.start >= t_start and an.start <= t_end and len(lst_annot) > 0:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
if len(lst_annot) == 0:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return, None
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
res_annotations = []
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
if len(an) > 1:
start_span = """<mark class="highlight hl_text_complex" data-hl-type="complex" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}">""".format(
start, end, obj.pk
)
for an2 in an:
_, res_ann = an2.get_html_markup(include_object=True)
res_annotations.append(res_ann)
else:
start_span, res_ann = an[0].get_html_markup(include_object=True)
res_annotations.append(res_ann)
if lst_end:
html_return += (
obj.text[lst_end:start] + start_span + obj.text[start:end] + "</mark>"
)
else:
html_return += start_span + obj.text[start:end] + "</mark>"
lst_end = end
html_return += obj.text[end:]
if obj.text[0] == "\n":
html_return = "-" + html_return[1:]
if not inline_annotations:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return, res_annotations
def highlight_text(*args, **kwargs):
ann_proj_pk = kwargs.pop("set_ann_proj", False)
types = kwargs.pop("types", False)
users_show = kwargs.pop("users_show", False)
t_start = 0
t_end = 0
obj = args[-1]
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
if not types or not users_show:
return obj.text
lst_annot = []
queries = dict()
if users_show:
queries["user_added_id__in"] = users_show
if ann_proj_pk:
queries["annotation_project__pk"] = ann_proj_pk
queries["text"] = obj
anns1 = Annotation.objects.filter(**queries).order_by("start")
anns_fin = []
for ann in anns1:
for lb in re.finditer(r"[\r\n]", obj.text):
if lb.start() < ann.start + (lb.end() - lb.start()):
ann.start += lb.end() - lb.start()
ann.end += lb.end() - lb.start()
anns_fin.append(ann)
for an in anns_fin:
if types:
m = an.entity_link
if m is not None:
t = ContentType.objects.get_for_model(m)
if not (str(t.pk) in types):
continue
if an.start >= t_start and an.start <= t_end:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
if len(lst_annot) == 0:
html_return = obj.text
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
if len(an) > 1:
start_span = """<mark class="highlight hl_text_complex" data-hl-type="complex" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}">""".format(
start, end, obj.pk
)
else:
start_span = an[0].get_html_markup()
if lst_end:
html_return += (
obj.text[lst_end:start] + start_span + obj.text[start:end] + "</mark>"
)
else:
html_return += start_span + obj.text[start:end] + "</mark>"
lst_end = end
html_return += obj.text[end:]
if obj.text[0] == "\n":
html_return = "-" + html_return[1:]
html_return, nmbs = re.subn(r"\r\n", "<br/>", html_return)
html_return, nmbs = re.subn(r"\r", "<br/>", html_return)
html_return, nmbs = re.subn(r"\n", "<br/>", html_return)
return html_return
def highlight_textTEI(*args, **kwargs):
user_pk = kwargs.pop("user", False)
ann_proj_pk = kwargs.pop("ann_proj", False)
obj = args[-1]
t_start = 0
t_end = 0
if isinstance(obj, str):
obj = Text.objects.get(pk=obj)
lst_annot = []
for an in Annotation.objects.filter(text=obj).order_by("start"):
if an.start >= t_start and an.start <= t_end:
lst_annot[-1].append(an)
else:
lst_annot.append(
[
an,
]
)
t_start = an.start
t_end = an.end
# print(lst_annot)
if len(lst_annot) == 0:
return obj.text
html_return = obj.text[: lst_annot[0][0].start]
end = ""
lst_end = None
for an in lst_annot:
start = min([x.start for x in an])
end = max([x.end for x in an])
try:
lst_classes = str(an[0].entity_link.relation_type.pk)
except:
try:
lst_classes = str(an[0].entity_link.kind.pk)
except:
lst_classes = ""
if len(an) > 1:
start_span = '<name type="complex" hl-start="{}" hl-end="{}" hl-text-id="{}">'.format(
start, end, obj.pk
)
else:
try:
entity_type = type(an[0].entity_link).__name__
entity_pk = an[0].entity_link.pk
except:
entity_type = ""
entity_pk = ""
ent_lst_pk = []
try:
entity_uri = an[0].entity_link.uri_set.values_list('uri', flat=True)[0]
except:
entity_uri = 'internal db id: {}'.format(an[0].entity_link.pk)
start_span = '<name hl-type="simple" hl-start="{}" hl-end="{}" hl-text-id="{}" hl-ann-id="{}" type="{}" entity-pk="{}" related-entity-pk="{}">'.format(
start,
end,
obj.pk,
an[0].pk,
entity_type,
entity_pk,
",".join(ent_lst_pk),
)
#'<span class="highlight hl_text_{}" data-hl-type="simple" data-hl-start="{}" data-hl-end="{}" data-hl-text-id="{}" data-hl-ann-id="{}" data-entity-class="{}" data-entity-pk="{}" data-related-entity-pk="{}">'.format(start, end, obj.pk, an[0].pk, entity_type, entity_pk, ','.join(ent_lst_pk))
if lst_end:
if len(an) > 1:
html_return += (
obj.text[lst_end:start]
+ start_span
+ obj.text[start:end]
+ "</name>"
)
else:
html_return += (
obj.text[lst_end:start]
+ start_span
+ obj.text[start:end]
+ "<index><term>"
+ entity_uri
+ "</term></index></name>"
)
else:
html_return += start_span + obj.text[start:end] + "</name>"
lst_end = end
html_return += obj.text[end:]
return html_return
| 36.416988
| 303
| 0.513147
| 1,255
| 9,432
| 3.662948
| 0.094821
| 0.106591
| 0.055688
| 0.048075
| 0.785947
| 0.762671
| 0.756363
| 0.747662
| 0.7196
| 0.70372
| 0
| 0.007844
| 0.33768
| 9,432
| 259
| 304
| 36.416988
| 0.728029
| 0.07517
| 0
| 0.722689
| 0
| 0.012605
| 0.094019
| 0.02009
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012605
| false
| 0
| 0.016807
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bf821f1fba117acb6e32d7d42fd26be0ab419540
| 197
|
py
|
Python
|
game_generic/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
game_generic/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
game_generic/__init__.py
|
Holt59/modorganizer-python_plugins
|
f3404b1c3d9b8f5a6aa2133b47f7fc0218c18dc9
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Force load of resources so that Qt can see them:
from .resources import * # noqa
from .generic_game import GenericGame
def createPlugin():
return GenericGame()
| 17.909091
| 50
| 0.700508
| 26
| 197
| 5.269231
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.19797
| 197
| 10
| 51
| 19.7
| 0.860759
| 0.390863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bfa068d8a934be3d7b1205fc6ff72b2f92ee4aa5
| 636
|
py
|
Python
|
covertutils/covertutils/shells/subshells/__init__.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | 2
|
2020-11-23T23:54:32.000Z
|
2021-05-25T12:28:05.000Z
|
covertutils/covertutils/shells/subshells/__init__.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | 1
|
2021-03-20T05:43:02.000Z
|
2021-03-20T05:43:02.000Z
|
covertutils/covertutils/shells/subshells/__init__.py
|
aidden-laoch/sabre
|
0940aa51dfc5074291df9d29db827ddb4010566d
|
[
"MIT"
] | null | null | null |
#
from covertutils.shells.subshells.simplesubshell import SimpleSubShell
#
from covertutils.shells.subshells.shellcodesubshell import ShellcodeSubShell
#
from covertutils.shells.subshells.pythonapisubshell import PythonAPISubShell
#
from covertutils.shells.subshells.controlsubshell import ControlSubShell
#
from covertutils.shells.subshells.filesubshell import FileSubShell
##
from covertutils.shells.subshells.examplesubshell import ExampleSubShell
from covertutils.shells.subshells.meterpretersubshell import MeterpreterSubShell
from covertutils.shells.subshells.stagesubshell import StageSubShell # Causing circular dependencies
| 37.411765
| 101
| 0.878931
| 59
| 636
| 9.474576
| 0.271186
| 0.214669
| 0.300537
| 0.429338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070755
| 636
| 16
| 102
| 39.75
| 0.945854
| 0.045597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bfd93482b38e9761ce0b5a5309864c90fa7acac9
| 7,298
|
py
|
Python
|
tests/filesystems/test_s3.py
|
altescy/minato
|
bcb198a3f65192367df71a6c3a3324a6b2285785
|
[
"MIT"
] | 8
|
2021-05-16T00:50:01.000Z
|
2022-02-09T11:15:56.000Z
|
tests/filesystems/test_s3.py
|
altescy/minato
|
bcb198a3f65192367df71a6c3a3324a6b2285785
|
[
"MIT"
] | null | null | null |
tests/filesystems/test_s3.py
|
altescy/minato
|
bcb198a3f65192367df71a6c3a3324a6b2285785
|
[
"MIT"
] | 1
|
2021-06-20T07:22:53.000Z
|
2021-06-20T07:22:53.000Z
|
import tempfile
from pathlib import Path
import boto3
from moto import mock_s3
from minato.filesystems import S3FileSystem
@mock_s3
def test_open_file() -> None:
url = "s3://my_bucket/path/to/file"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
fs = S3FileSystem(url)
with fs.open_file("w") as fp:
fp.write("Hello, world!")
with fs.open_file("r") as fp:
text = fp.read()
assert text == "Hello, world!"
@mock_s3
def test_download_file() -> None:
url = "s3://my_bucket/path/to/file"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/file").open_file("w") as fp:
fp.write("file")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "file").is_file()
@mock_s3
def test_download_dir_with_trailing_slash() -> None:
url = "s3://my_bucket/path/to/dir/"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket/path/to/dir/bar/bar.txt").open_file("w") as fp:
fp.write("bar")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "foo.txt").is_file()
assert (tempdir / "bar").is_dir()
assert (tempdir / "bar" / "bar.txt").is_file()
@mock_s3
def test_download_dir_without_trailing_slash() -> None:
url = "s3://my_bucket/path/to/dir"
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket/path/to/dir/bar/bar.txt").open_file("w") as fp:
fp.write("bar")
with tempfile.TemporaryDirectory() as _tempdir:
tempdir = Path(_tempdir)
fs = S3FileSystem(url)
fs.download(tempdir)
assert (tempdir / "dir" / "foo.txt").is_file()
assert (tempdir / "dir" / "bar").is_dir()
assert (tempdir / "dir" / "bar" / "bar.txt").is_file()
@mock_s3
def test_exists() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
assert S3FileSystem("s3://my_bucket/path/to/dir").exists()
assert S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").exists()
assert not S3FileSystem("s3://my_bucket/path/to/dir/bar.txt").exists()
@mock_s3
def test_delete() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket/path/to/dir/foo.txt").open_file("w") as fp:
fp.write("foo")
fs = S3FileSystem("s3://my_bucket/path/to/dir")
assert fs.exists()
fs.delete()
assert not fs.exists()
@mock_s3
def test_get_version() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with S3FileSystem("s3://my_bucket//dir/foo.txt").open_file("w") as fp:
fp.write("foo")
with S3FileSystem("s3://my_bucket//dir/bar.txt").open_file("w") as fp:
fp.write("bar")
fs = S3FileSystem("s3://my_bucket//dir")
version = fs.get_version()
assert version is not None
assert len(version.split(".")) == 2
@mock_s3
def test_update_version() -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
fs = S3FileSystem("s3://my_bucket//dir/foo.txt")
with fs.open_file("w") as fp:
fp.write("hello")
old_version = fs.get_version()
assert old_version is not None
current_version = fs.get_version()
assert current_version is not None
assert current_version == old_version
with fs.open_file("w") as fp:
fp.write("world")
new_version = fs.get_version()
assert new_version is not None
assert old_version != new_version
@mock_s3
def test_upload_file(tmpdir: Path) -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
filename = tmpdir / "foo.txt"
with open(filename, "w") as localfile:
localfile.write("this is foo!")
fs = S3FileSystem("s3://my_bucket/dir/foo.txt")
assert not fs.exists()
fs.upload(filename)
assert fs.exists()
with fs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
@mock_s3
def test_upload_file_to_dir(tmpdir: Path) -> None:
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
filename = tmpdir / "foo.txt"
with open(filename, "w") as localfile:
localfile.write("this is foo!")
dirfs = S3FileSystem("s3://my_bucket/dir/")
assert not dirfs.exists()
dirfs.upload(filename)
filefs = S3FileSystem("s3://my_bucket/dir/foo.txt")
assert filefs.exists()
with filefs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
@mock_s3
def test_upload_dir(tmpdir: Path) -> None:
workdir = tmpdir / "work"
workdir.mkdir()
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with open(workdir / "foo.txt", "w") as localfile:
localfile.write("this is foo!")
with open(workdir / "bar.txt", "w") as localfile:
localfile.write("this is bar!")
dirfs = S3FileSystem("s3://my_bucket/dir")
assert not dirfs.exists()
dirfs.upload(workdir)
foofs = S3FileSystem("s3://my_bucket/dir/foo.txt")
barfs = S3FileSystem("s3://my_bucket/dir/bar.txt")
assert foofs.exists()
assert barfs.exists()
with foofs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
with barfs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is bar!"
@mock_s3
def test_upload_dir_to_dir(tmpdir: Path) -> None:
workdir = tmpdir / "work"
workdir.mkdir()
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="my_bucket")
with open(workdir / "foo.txt", "w") as localfile:
localfile.write("this is foo!")
with open(workdir / "bar.txt", "w") as localfile:
localfile.write("this is bar!")
dirfs = S3FileSystem("s3://my_bucket/dir/")
assert not dirfs.exists()
dirfs.upload(workdir)
foofs = S3FileSystem("s3://my_bucket/dir/work/foo.txt")
barfs = S3FileSystem("s3://my_bucket/dir/work/bar.txt")
assert foofs.exists()
assert barfs.exists()
with foofs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is foo!"
with barfs.open_file("r") as remotefile:
content = remotefile.read()
assert content == "this is bar!"
| 27.231343
| 85
| 0.643327
| 1,034
| 7,298
| 4.390716
| 0.075435
| 0.070485
| 0.061674
| 0.1163
| 0.897577
| 0.837225
| 0.801322
| 0.789427
| 0.726652
| 0.700441
| 0
| 0.018449
| 0.197862
| 7,298
| 267
| 86
| 27.333333
| 0.757089
| 0
| 0
| 0.639344
| 0
| 0
| 0.187038
| 0.097698
| 0
| 0
| 0
| 0
| 0.196721
| 1
| 0.065574
| false
| 0
| 0.027322
| 0
| 0.092896
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
449bbabb756ac92734377e46afd4f7a309af927c
| 12,269
|
py
|
Python
|
app/school/tests/test_moderation_api.py
|
bondeveloper/maischool
|
16bf2afe99d26caa067b7912e88839639cf2191e
|
[
"MIT"
] | null | null | null |
app/school/tests/test_moderation_api.py
|
bondeveloper/maischool
|
16bf2afe99d26caa067b7912e88839639cf2191e
|
[
"MIT"
] | null | null | null |
app/school/tests/test_moderation_api.py
|
bondeveloper/maischool
|
16bf2afe99d26caa067b7912e88839639cf2191e
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Category, School, Session, Subject, Level, Lesson, \
Moderation
from school.serializers import SessionSerializer, ModerationSerializer, \
UserSerializer
reg_url = '/api/v1/accounts/auth/registration/'
class TestPrivateModerationApi(TestCase):
def setUp(self):
self.client = APIClient()
self.client = APIClient()
payload = {
"email": "testuser@bondeveloper.com",
"password": "Qwerty!@#",
"password1": "Qwerty!@#",
"password2": "Qwerty!@#",
"username": "testuser01"
}
auth_user = self.client.post(reg_url, payload, format='json')
access_token = auth_user.data.get('access_token')
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + access_token)
def test_authentication_required(self):
self.client = APIClient()
res = self.client.get(reverse('school:moderation-list'))
self.assertEquals(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_moderation_create_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
payload = {
"session": SessionSerializer(ses).data.get('id'),
"learner": UserSerializer(learner).data.get('id'),
"learner_score": 45,
"max_score": 100,
"score_type": "unit",
}
res = self.client.post(reverse("school:moderation-create"),
payload, format='json'
)
self.assertEquals(res.status_code, status.HTTP_201_CREATED)
self.assertEquals(res.data.get('learner_score'), 45)
def test_moderation_update_api(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
res = self.client.patch(reverse(
"school:moderation-update",
args=[mod.id]
),
ModerationSerializer(mod).data,
format='json'
)
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(res.data.get('score_type'), 'unit')
def test_moderation_list_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
res = self.client.get(reverse('school:moderation-list'))
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(len(res.data), 2)
def test_moderation_delete_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod1 = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
mod = Moderation.objects.all()
ser = ModerationSerializer(mod, many=True)
self.assertEquals(len(ser.data), 2)
res = self.client.delete(reverse(
'school:moderation-delete',
args=[mod1.id]
),
ser.data, format='json'
)
self.assertEquals(res.status_code, status.HTTP_204_NO_CONTENT)
mod = Moderation.objects.all()
ser = ModerationSerializer(mod, many=True)
self.assertEquals(len(ser.data), 1)
def test_moderation_retrieve_successful(self):
learner = get_user_model().objects.create(
username="learner01",
email="learner@bondeveloper.coom",
password="Qwerty!@#",
)
learner2 = get_user_model().objects.create(
username="learner02",
email="learner02@bondeveloper.coom",
password="Qwerty!@#",
)
instructor = get_user_model().objects.create(
username="instructor",
email="instructor@bondeveloper.coom",
password="Qwerty!@#",
)
cat = Category.objects.create(
basename="sample1",
name="Sample Category"
)
sch = School.objects.create(
basename="gruut-high",
name="Gruut High",
category=cat,
)
sub = Subject.objects.create(
basename="spanish-fal",
name="Spanish FAL",
school=sch
)
level = Level.objects.create(
basename="grade-9",
name="Grade 9",
school=sch
)
les = Lesson.objects.create(
subject=sub,
level=level,
instructor=instructor,
name="Python 101"
)
les.learners.add(learner, learner2)
ses = Session.objects.create(
start_time=timezone.now(),
type="TCN",
end_time=timezone.now(),
lesson=les
)
mod1 = Moderation.objects.create(
session=ses,
learner=learner,
learner_score=20,
max_score=100,
score_type="unit"
)
Moderation.objects.create(
session=ses,
learner=learner,
learner_score=40,
max_score=100,
score_type="percentage"
)
res = self.client.get(reverse("school:moderation-view",
args=[mod1.id]
)
)
self.assertEquals(res.status_code, status.HTTP_200_OK)
self.assertEquals(res.data.get('score_type'), 'unit')
| 28.532558
| 76
| 0.5108
| 1,074
| 12,269
| 5.728119
| 0.127561
| 0.109883
| 0.06827
| 0.046326
| 0.804454
| 0.800228
| 0.800228
| 0.800228
| 0.800228
| 0.784135
| 0
| 0.018677
| 0.380308
| 12,269
| 429
| 77
| 28.599068
| 0.790477
| 0
| 0
| 0.695291
| 0
| 0
| 0.12878
| 0.048741
| 0
| 0
| 0
| 0
| 0.033241
| 1
| 0.019391
| false
| 0.049862
| 0.022161
| 0
| 0.044321
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
44c6da2588473857006929d493bd43cadac15056
| 182
|
py
|
Python
|
backend/course_application/admin.py
|
heyImDrew/edupro
|
98b8342dda45071da4871bbf73f2ef002fee938f
|
[
"Apache-2.0"
] | null | null | null |
backend/course_application/admin.py
|
heyImDrew/edupro
|
98b8342dda45071da4871bbf73f2ef002fee938f
|
[
"Apache-2.0"
] | null | null | null |
backend/course_application/admin.py
|
heyImDrew/edupro
|
98b8342dda45071da4871bbf73f2ef002fee938f
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Course)
admin.site.register(UserCourse)
admin.site.register(Partition)
admin.site.register(PartitionTask)
| 22.75
| 34
| 0.824176
| 24
| 182
| 6.25
| 0.5
| 0.24
| 0.453333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 182
| 7
| 35
| 26
| 0.887574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
783c54535432ca817e5091aa60095976f7a9378c
| 180
|
py
|
Python
|
bitcoinscript/debugger/__init__.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | 1
|
2017-10-25T17:11:44.000Z
|
2017-10-25T17:11:44.000Z
|
bitcoinscript/debugger/__init__.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | 3
|
2017-03-10T05:27:29.000Z
|
2017-04-07T16:06:28.000Z
|
bitcoinscript/debugger/__init__.py
|
fungibit/bitcoinscript
|
ced6fb37dfa40eac7341826c758842e0ed7e7475
|
[
"MIT"
] | null | null | null |
from .dbg import run_in_debugger, start_debugger
from .utils import debug_sample
# suppress pyflakes "imported but unused" warnings:
run_in_debugger, start_debugger
debug_sample
| 22.5
| 51
| 0.838889
| 26
| 180
| 5.5
| 0.615385
| 0.06993
| 0.181818
| 0.251748
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 180
| 7
| 52
| 25.714286
| 0.899371
| 0.272222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7850cdbf5aa5b5cdc836f3b7b8695fc3dc13997b
| 124
|
py
|
Python
|
mainapp/StaffView.py
|
Facele55/E-Commerce
|
20103ee09a3c349eb3a0fffeb16b7fc86c5cabbc
|
[
"Apache-2.0"
] | null | null | null |
mainapp/StaffView.py
|
Facele55/E-Commerce
|
20103ee09a3c349eb3a0fffeb16b7fc86c5cabbc
|
[
"Apache-2.0"
] | null | null | null |
mainapp/StaffView.py
|
Facele55/E-Commerce
|
20103ee09a3c349eb3a0fffeb16b7fc86c5cabbc
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
def staff_home(request):
return render(request, 'staff_template/staff_home.html')
| 20.666667
| 60
| 0.790323
| 17
| 124
| 5.588235
| 0.705882
| 0.189474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 5
| 61
| 24.8
| 0.87156
| 0
| 0
| 0
| 0
| 0
| 0.241935
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
786031bca4e4c25f1b895df6dc07e93096ae9dfe
| 148
|
py
|
Python
|
Python/read/read.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | 1
|
2018-10-31T10:36:34.000Z
|
2018-10-31T10:36:34.000Z
|
Python/read/read.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | null | null | null |
Python/read/read.py
|
BackEndTea/Learning
|
ccbf8fdbb8fb23643898d73e64fe7442ab46b791
|
[
"MIT"
] | 2
|
2021-05-06T11:18:25.000Z
|
2021-12-04T07:56:29.000Z
|
with open('file.csv', 'r') as f:
a = [i for i in f]
print(a)
with open('file.csv', 'r') as f:
a = [i[1: -2] for i in f]
print(a)
| 14.8
| 32
| 0.472973
| 32
| 148
| 2.1875
| 0.4375
| 0.228571
| 0.342857
| 0.428571
| 0.971429
| 0.971429
| 0.6
| 0.6
| 0.6
| 0
| 0
| 0.019608
| 0.310811
| 148
| 9
| 33
| 16.444444
| 0.666667
| 0
| 0
| 0.666667
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78a63407fdf170dd04e91df753c26e7db2382f0f
| 6,405
|
py
|
Python
|
python-example/test_tittle_collor_and_font.py
|
eugene-petrash/selenium-webdriver-full-tutorial
|
a83f77982b28c2228b48e531f34ae6ff24808039
|
[
"Apache-2.0"
] | null | null | null |
python-example/test_tittle_collor_and_font.py
|
eugene-petrash/selenium-webdriver-full-tutorial
|
a83f77982b28c2228b48e531f34ae6ff24808039
|
[
"Apache-2.0"
] | null | null | null |
python-example/test_tittle_collor_and_font.py
|
eugene-petrash/selenium-webdriver-full-tutorial
|
a83f77982b28c2228b48e531f34ae6ff24808039
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
import regex # pip install regex
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_tittle_collor_and_font(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//a[contains(., 'Campaign Products')]").click()
tested_item_main_page = driver.find_element(By.CSS_SELECTOR, '#campaign-products .col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths:first-child')
# On the main page.
item_name_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.info .name').text
regular_price_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.regular-price')
regular_price_text_on_the_main_page = regular_price_on_the_main_page.text
regular_price_color_on_the_main_page = regular_price_on_the_main_page.value_of_css_property('color')
regular_price_R_color_on_the_main_page = regex.search('(?<=\()\d+', regular_price_color_on_the_main_page).group()
regular_price_G_color_on_the_main_page = regex.search('\(\d+\,\s\K\d+', regular_price_color_on_the_main_page).group()
regular_price_B_color_on_the_main_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', regular_price_color_on_the_main_page).group()
assert regular_price_R_color_on_the_main_page == regular_price_G_color_on_the_main_page == regular_price_B_color_on_the_main_page # Color is gray
regular_price_decoration_on_the_main_page = regex.search('(blink|line-through|overline|underline|none|inherit)',
regular_price_on_the_main_page.value_of_css_property('text-decoration')).group()
assert regular_price_decoration_on_the_main_page == 'line-through' # text-decoration:line-through
regular_price_size_on_the_main_page = regular_price_on_the_main_page.value_of_css_property('font-size')
campaign_price_on_the_main_page = tested_item_main_page.find_element(By.CSS_SELECTOR, '.campaign-price')
campaign_price_text_on_the_main_page = campaign_price_on_the_main_page.text
campaign_price_color_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('color')
campaign_price_G_color_on_the_main_page = regex.search('\(\d+\,\s\K\d+', campaign_price_color_on_the_main_page).group()
campaign_price_B_color_on_the_main_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', campaign_price_color_on_the_main_page).group()
assert int(campaign_price_G_color_on_the_main_page) == int(campaign_price_B_color_on_the_main_page) == 0 # Color is red
campaign_price_decoration_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('font-weight')
try:
assert campaign_price_decoration_on_the_main_page == 'bold' # font-weight:bold . Chrome
except AssertionError:
try:
assert campaign_price_decoration_on_the_main_page == '700' # font-weight:700 . FF
except AssertionError:
assert False
campaign_price_size_on_the_main_page = campaign_price_on_the_main_page.value_of_css_property('font-size')
assert regular_price_size_on_the_main_page < campaign_price_size_on_the_main_page # The font size of the campaign price is larger than the regular price
# On the item's page
tested_item_main_page.click()
item_name_on_the_item_page = driver.find_element(By.CSS_SELECTOR, 'h1').text
assert item_name_on_the_main_page == item_name_on_the_item_page # The same item name on the main and on the item pages
regular_price_on_the_item_page = driver.find_element(By.CSS_SELECTOR, '.regular-price')
assert regular_price_text_on_the_main_page == regular_price_on_the_item_page.text # The same regular item's price on the main and on the item pages
regular_price_color_on_the_item_page = regular_price_on_the_item_page.value_of_css_property('color')
regular_price_R_color_on_the_item_page = regex.search('(?<=\()\d+', regular_price_color_on_the_item_page).group()
regular_price_G_color_on_the_item_page = regex.search('\(\d+\,\s\K\d+', regular_price_color_on_the_item_page).group()
regular_price_B_color_on_the_item_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', regular_price_color_on_the_item_page).group()
assert regular_price_R_color_on_the_item_page == regular_price_G_color_on_the_item_page == regular_price_B_color_on_the_item_page # Color is gray
regular_price_decoration_on_the_item_page = regex.search('(blink|line-through|overline|underline|none|inherit)',
regular_price_on_the_item_page.value_of_css_property('text-decoration')).group()
assert regular_price_decoration_on_the_item_page == 'line-through' # text-decoration:line-through
regular_price_size_on_the_item_page = regular_price_on_the_item_page.value_of_css_property('font-size')
campaign_price_on_the_item_page = driver.find_element(By.CSS_SELECTOR, '.campaign-price')
assert campaign_price_text_on_the_main_page == campaign_price_on_the_item_page.text # The same campaign item's price on the main and on the item pages
campaign_price_color_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('color')
campaign_price_G_color_on_the_item_page = regex.search('\(\d+\,\s\K\d+', campaign_price_color_on_the_item_page).group()
campaign_price_B_color_on_the_item_page = regex.search('\(\d+\,\s\d+\,\s\K\d+', campaign_price_color_on_the_item_page).group()
assert int(campaign_price_G_color_on_the_item_page) == int(campaign_price_B_color_on_the_item_page) == 0 # Color is red
campaign_price_decoration_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('font-weight')
try:
assert campaign_price_decoration_on_the_item_page == 'bold' # font-weight:bold . Chrome
except AssertionError:
try:
assert campaign_price_decoration_on_the_item_page == '700' # font-weight:700 . FF
except AssertionError:
assert False
campaign_price_size_on_the_item_page = campaign_price_on_the_item_page.value_of_css_property('font-size')
assert regular_price_size_on_the_item_page < campaign_price_size_on_the_item_page # The font size of the campaign price is larger than the regular price
| 62.794118
| 156
| 0.779859
| 1,023
| 6,405
| 4.347996
| 0.100684
| 0.098921
| 0.093076
| 0.125674
| 0.898606
| 0.889164
| 0.880396
| 0.848471
| 0.75607
| 0.713804
| 0
| 0.002868
| 0.129118
| 6,405
| 101
| 157
| 63.415842
| 0.79455
| 0.090554
| 0
| 0.138889
| 0
| 0.013889
| 0.108737
| 0.044115
| 0
| 0
| 0
| 0
| 0.291667
| 1
| 0.027778
| false
| 0
| 0.055556
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
153e2a6848b0234f1f714c484bd4989e7dfd3df7
| 117
|
py
|
Python
|
mmdet/ops/__init__.py
|
GuoBo98/ShipDet
|
2979c39c5a56be3b99ba77833cfe556a8a0fc97e
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/__init__.py
|
GuoBo98/ShipDet
|
2979c39c5a56be3b99ba77833cfe556a8a0fc97e
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/__init__.py
|
GuoBo98/ShipDet
|
2979c39c5a56be3b99ba77833cfe556a8a0fc97e
|
[
"Apache-2.0"
] | null | null | null |
from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
__all__ = ['RoIAlignRotated', 'roi_align_rotated']
| 39
| 65
| 0.837607
| 14
| 117
| 6.285714
| 0.5
| 0.272727
| 0.511364
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 117
| 3
| 66
| 39
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
15682165775caa290117504af2ff7fc3111f75eb
| 1,423
|
py
|
Python
|
.c9/metadata/environment/Noda_SF_Project/Noda_SF_Project/settings.py
|
bopopescu/nodasf
|
32718c9ba606a7373b20c77710fd3706fc583396
|
[
"MIT"
] | null | null | null |
.c9/metadata/environment/Noda_SF_Project/Noda_SF_Project/settings.py
|
bopopescu/nodasf
|
32718c9ba606a7373b20c77710fd3706fc583396
|
[
"MIT"
] | 9
|
2019-12-05T20:37:07.000Z
|
2022-02-10T12:34:48.000Z
|
.c9/metadata/environment/Noda_SF_Project/Noda_SF_Project/settings.py
|
bopopescu/nodasf
|
32718c9ba606a7373b20c77710fd3706fc583396
|
[
"MIT"
] | 1
|
2020-07-25T23:37:21.000Z
|
2020-07-25T23:37:21.000Z
|
{"filter":false,"title":"settings.py","tooltip":"/Noda_SF_Project/Noda_SF_Project/settings.py","undoManager":{"mark":3,"position":3,"stack":[[{"start":{"row":38,"column":33},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":2},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":6},"action":"insert","lines":["''"],"id":3}],[{"start":{"row":39,"column":5},"end":{"row":39,"column":6},"action":"insert","lines":["n"],"id":4},{"start":{"row":39,"column":6},"end":{"row":39,"column":7},"action":"insert","lines":["o"]},{"start":{"row":39,"column":7},"end":{"row":39,"column":8},"action":"insert","lines":["d"]},{"start":{"row":39,"column":8},"end":{"row":39,"column":9},"action":"insert","lines":["a"]},{"start":{"row":39,"column":9},"end":{"row":39,"column":10},"action":"insert","lines":["s"]},{"start":{"row":39,"column":10},"end":{"row":39,"column":11},"action":"insert","lines":["f"]}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":[","],"id":5}]]},"ace":{"folds":[],"scrolltop":1233,"scrollleft":0,"selection":{"start":{"row":120,"column":13},"end":{"row":120,"column":13},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"b94b6875263f69d9119932a08b7ab4cf189c05b3","timestamp":1562792805975}
| 1,423
| 1,423
| 0.593113
| 196
| 1,423
| 4.285714
| 0.311224
| 0.113095
| 0.24881
| 0.166667
| 0.07619
| 0.07619
| 0.07619
| 0.07619
| 0
| 0
| 0
| 0.090909
| 0.002811
| 1,423
| 1
| 1,423
| 1,423
| 0.501057
| 0
| 0
| 0
| 0
| 0
| 0.514045
| 0.058989
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15846f7192fcdc591856fb8955166b95b74770b7
| 154
|
py
|
Python
|
order.py
|
alexsandrox/designpattern-strategy-python
|
69b4cdb1cf88a340f2bde38e387a3c9c6560806a
|
[
"MIT"
] | null | null | null |
order.py
|
alexsandrox/designpattern-strategy-python
|
69b4cdb1cf88a340f2bde38e387a3c9c6560806a
|
[
"MIT"
] | null | null | null |
order.py
|
alexsandrox/designpattern-strategy-python
|
69b4cdb1cf88a340f2bde38e387a3c9c6560806a
|
[
"MIT"
] | null | null | null |
class Order(object):
def __init__(self, value):
self.__value = value
@property
def value(self):
return self.__value
| 17.111111
| 30
| 0.577922
| 17
| 154
| 4.764706
| 0.529412
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.331169
| 154
| 9
| 31
| 17.111111
| 0.786408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
159eb6f3bd3db35874296877ff6439ebc3a807ba
| 137
|
py
|
Python
|
pycwr/io/BaseDataProtocol/__init__.py
|
aliny2003/pycwr
|
3186125bdd2f551b9c59639dca567a1c06b50e96
|
[
"MIT"
] | 144
|
2019-11-27T14:36:41.000Z
|
2022-02-23T08:21:17.000Z
|
pycwr/io/BaseDataProtocol/__init__.py
|
zhaopingsun/pycwr
|
7459371588e6d0d6d0737e249afa3921fe073151
|
[
"MIT"
] | 32
|
2019-11-29T10:11:53.000Z
|
2022-03-14T07:46:44.000Z
|
pycwr/io/BaseDataProtocol/__init__.py
|
zhaopingsun/pycwr
|
7459371588e6d0d6d0737e249afa3921fe073151
|
[
"MIT"
] | 57
|
2019-11-27T12:51:44.000Z
|
2022-01-29T14:50:05.000Z
|
from . import CCProtocol, SABProtocol, SCProtocol, WSR98DProtocol
__all__ = ["CCProtocol", "SABProtocol", "SCProtocol", "WSR98DProtocol"]
| 68.5
| 71
| 0.781022
| 11
| 137
| 9.363636
| 0.636364
| 0.407767
| 0.601942
| 0.873786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032
| 0.087591
| 137
| 2
| 71
| 68.5
| 0.792
| 0
| 0
| 0
| 0
| 0
| 0.326087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
15ba07a2ed799dc5d1afcacd9adc7e0a124b0fc0
| 2,309
|
py
|
Python
|
tests/relay/test_node_permissions.py
|
Dagurmart/graphene-django-plus
|
c8bc3c1179a02f51ff97bb8ba9dd187737a28b68
|
[
"MIT"
] | null | null | null |
tests/relay/test_node_permissions.py
|
Dagurmart/graphene-django-plus
|
c8bc3c1179a02f51ff97bb8ba9dd187737a28b68
|
[
"MIT"
] | null | null | null |
tests/relay/test_node_permissions.py
|
Dagurmart/graphene-django-plus
|
c8bc3c1179a02f51ff97bb8ba9dd187737a28b68
|
[
"MIT"
] | 1
|
2020-06-11T19:15:51.000Z
|
2020-06-11T19:15:51.000Z
|
import pytest
from graphql_relay import to_global_id
from rest_framework.utils import json
@pytest.mark.django_db()
def test_node_permission_classes_without_authentication(book_factory, graphql_client):
book = book_factory()
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": None},
"errors": [
{
"locations": [{"column": 13, "line": 3}],
"message": "You do not have permission to perform this action.",
"path": ["bookAsAdmin"],
}
],
}
@pytest.mark.django_db()
def test_node_permission_classes_without_permission(
user_factory, book_factory, graphql_client
):
user = user_factory()
book = book_factory()
graphql_client.force_authenticate(user)
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": None},
"errors": [
{
"locations": [{"column": 13, "line": 3}],
"message": "You do not have permission to perform this action.",
"path": ["bookAsAdmin"],
}
],
}
@pytest.mark.django_db()
def test_node_permission_classes_with_permission(
user_factory, book_factory, graphql_client
):
user = user_factory(is_staff=True)
book = book_factory()
graphql_client.force_authenticate(user)
response = graphql_client.execute(
"""
query BookAsAdmin($id: ID!) {
bookAsAdmin(id: $id) {
id
}
}""",
variables={"id": to_global_id("BookType", book.pk)},
)
assert response.status_code == 200
assert json.loads(response.content) == {
"data": {"bookAsAdmin": {"id": to_global_id("BookType", book.pk)}}
}
| 26.54023
| 86
| 0.563014
| 233
| 2,309
| 5.347639
| 0.266094
| 0.028892
| 0.072231
| 0.096308
| 0.88443
| 0.88443
| 0.88443
| 0.863563
| 0.863563
| 0.863563
| 0
| 0.009323
| 0.303162
| 2,309
| 86
| 87
| 26.848837
| 0.765071
| 0
| 0
| 0.62069
| 0
| 0
| 0.14315
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0.051724
| false
| 0
| 0.051724
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ecce5739e9c5dd5c258cb92aff8a14712e75760c
| 229
|
py
|
Python
|
halalar/legal/views.py
|
jawaidss/halalar-web
|
abb5db6fa83aba7b7a280fcff1b880f36c0b4548
|
[
"MIT"
] | 1
|
2015-11-09T22:09:43.000Z
|
2015-11-09T22:09:43.000Z
|
halalar/legal/views.py
|
jawaidss/halalar-web
|
abb5db6fa83aba7b7a280fcff1b880f36c0b4548
|
[
"MIT"
] | null | null | null |
halalar/legal/views.py
|
jawaidss/halalar-web
|
abb5db6fa83aba7b7a280fcff1b880f36c0b4548
|
[
"MIT"
] | null | null | null |
from django.views.generic.base import TemplateView
class PrivacyPolicyView(TemplateView):
template_name = 'legal/privacy_policy.html'
class TermsOfServiceView(TemplateView):
template_name = 'legal/terms_of_service.html'
| 32.714286
| 50
| 0.816594
| 26
| 229
| 7
| 0.730769
| 0.21978
| 0.263736
| 0.318681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100437
| 229
| 7
| 51
| 32.714286
| 0.883495
| 0
| 0
| 0
| 0
| 0
| 0.226087
| 0.226087
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
01ae4a445073c96133a2bcafddf9ca20b082d923
| 213
|
py
|
Python
|
model/__init__.py
|
Dece-brove/APAN
|
4703d528c283e26a8d8f31ab6664b5404a2db5b5
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Dece-brove/APAN
|
4703d528c283e26a8d8f31ab6664b5404a2db5b5
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Dece-brove/APAN
|
4703d528c283e26a8d8f31ab6664b5404a2db5b5
|
[
"MIT"
] | null | null | null |
from model.msg2mail import Msg2Mail
from model.encoder import Encoder
from model.decoder import Decoder
from model.updater import RNNUpdater, AttnUpdater
from model.aggregator import MLPAggregator, AttnAggregator
| 35.5
| 58
| 0.86385
| 27
| 213
| 6.814815
| 0.444444
| 0.244565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010471
| 0.103286
| 213
| 5
| 59
| 42.6
| 0.95288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
01db48123ace2512ead68fd1601f911ab0f60cef
| 41
|
py
|
Python
|
classes/Optimizer/__init__.py
|
coopersigrist/DnDML
|
782b8908147fc9d90c6fb1dbb25a394ca4022b14
|
[
"MIT"
] | 2
|
2021-05-31T22:44:50.000Z
|
2021-09-12T03:19:21.000Z
|
classes/Optimizer/__init__.py
|
coopersigrist/DnDML
|
782b8908147fc9d90c6fb1dbb25a394ca4022b14
|
[
"MIT"
] | null | null | null |
classes/Optimizer/__init__.py
|
coopersigrist/DnDML
|
782b8908147fc9d90c6fb1dbb25a394ca4022b14
|
[
"MIT"
] | 1
|
2021-07-22T12:54:47.000Z
|
2021-07-22T12:54:47.000Z
|
from .wrapper import create_optim_wrapper
| 41
| 41
| 0.902439
| 6
| 41
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf03e63169f4a69575f56090cf3ef31b53f95eee
| 158
|
py
|
Python
|
owstk/analysis/export.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
owstk/analysis/export.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
owstk/analysis/export.py
|
Tfitzpatrick846/OWSTK
|
dfce0fad56c0093c2c9cf45952cb3a0958e94706
|
[
"CNRI-Python"
] | null | null | null |
"""Export data"""
from scipy.io import savemat
def mat(filename, mdict):
"""Export dictionary to .mat file for MATLAB"""
savemat(filename, mdict)
| 15.8
| 51
| 0.677215
| 21
| 158
| 5.095238
| 0.761905
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189873
| 158
| 9
| 52
| 17.555556
| 0.835938
| 0.335443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bf3400e9e5516570d2ee907dd1eae2f4cc26b378
| 9,145
|
py
|
Python
|
tests/test_wildcard_matching.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
tests/test_wildcard_matching.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
tests/test_wildcard_matching.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
import pytest
from problems.wildcard_matching import Solution
@pytest.mark.parametrize("s, p, expected", [
("", "", True),
("", "*", True),
("", "**", True),
("a", "*", True),
("ab", "*", True),
("ab", "a*", True),
("ab", "*b", True),
("ab", "a*b", True),
("ab", "a*bc", False),
("ab", "a*cb", False),
("ab", "ac*b", False),
("ab", "ca*b", False),
("ab", "a?", True),
("a", "a?", False),
(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"*aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa*",
False),
(
"ababbbaabaabaabbbaabaabaaaababaaaabbbabaabbbababbababaababbaababaaabaaaabbbbabbaaaabaaaabbaababbabaababbaaaaabaababbbbbabbaaabbabbbaaabaaaaabbabbbaabababbabbbaaabaabaabababaaabababbbbaababaabababaabba",
"**b**a*****abaab*abb**bb*aba***a*a*aab***b*ab*baa*b*b*a**baba****b****bb*abba*bab*****bbab*aab****bab*ba",
True),
])
def test_isMatch(s, p, expected):
assert Solution().isMatch(s, p) == expected
| 295
| 4,114
| 0.951121
| 94
| 9,145
| 92.510638
| 0.414894
| 0.00345
| 0.00345
| 0.00184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023182
| 9,145
| 30
| 4,115
| 304.833333
| 0.973469
| 0
| 0
| 0
| 0
| 0.035714
| 0.936577
| 0.92936
| 0
| 1
| 0
| 0
| 0.035714
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.107143
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
172edcbd3b6c7bbc0d143811122621972d076286
| 27
|
py
|
Python
|
July21/EssentialPython/helloworld/helloworld.py
|
pythonbykhaja/intesivepython
|
d3074f35bf36a04d4d1d9b4ff4631733d40b5817
|
[
"Apache-2.0"
] | 2
|
2021-05-29T18:21:50.000Z
|
2021-07-24T13:03:30.000Z
|
July21/EssentialPython/helloworld/helloworld.py
|
pythonbykhaja/intesivepython
|
d3074f35bf36a04d4d1d9b4ff4631733d40b5817
|
[
"Apache-2.0"
] | null | null | null |
July21/EssentialPython/helloworld/helloworld.py
|
pythonbykhaja/intesivepython
|
d3074f35bf36a04d4d1d9b4ff4631733d40b5817
|
[
"Apache-2.0"
] | 2
|
2021-05-25T10:19:54.000Z
|
2021-09-21T12:20:48.000Z
|
print('Hello World Python')
| 27
| 27
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 27
| 1
| 27
| 27
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
173fffe9dd2206a27daccda030af3e72d01cef25
| 26
|
py
|
Python
|
behavior/scripts/main.py
|
SettingDust/more-chicken
|
3816b4f37ed3b6c115dfa9001ec6554b1d69af81
|
[
"Apache-2.0"
] | null | null | null |
behavior/scripts/main.py
|
SettingDust/more-chicken
|
3816b4f37ed3b6c115dfa9001ec6554b1d69af81
|
[
"Apache-2.0"
] | null | null | null |
behavior/scripts/main.py
|
SettingDust/more-chicken
|
3816b4f37ed3b6c115dfa9001ec6554b1d69af81
|
[
"Apache-2.0"
] | 1
|
2020-10-19T15:32:23.000Z
|
2020-10-19T15:32:23.000Z
|
from common.mod import Mod
| 26
| 26
| 0.846154
| 5
| 26
| 4.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bd5ce6155a750a874aeaeebfe810ee41acfd3156
| 589
|
py
|
Python
|
udb_py/udb_storage.py
|
akaterra/udb.py
|
3c04fa788e4b2fc8356c1210b9d81004aa932c0b
|
[
"MIT"
] | 2
|
2020-01-04T12:04:58.000Z
|
2020-02-15T16:32:12.000Z
|
udb_py/udb_storage.py
|
akaterra/udb.py
|
3c04fa788e4b2fc8356c1210b9d81004aa932c0b
|
[
"MIT"
] | null | null | null |
udb_py/udb_storage.py
|
akaterra/udb.py
|
3c04fa788e4b2fc8356c1210b9d81004aa932c0b
|
[
"MIT"
] | null | null | null |
class UdbStorage(object):
def is_available(self):
raise NotImplementedError
def is_capture_events(self):
return False
def drop(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def save(self, indexes, revision, data):
raise NotImplementedError
def save_meta(self, indexes, revision):
raise NotImplementedError
def on_delete(self, rid):
return self
def on_insert(self, rid, record):
return self
def on_update(self, rid, record, values):
return self
| 21.035714
| 45
| 0.650255
| 66
| 589
| 5.69697
| 0.409091
| 0.319149
| 0.359043
| 0.24734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.280136
| 589
| 27
| 46
| 21.814815
| 0.886792
| 0
| 0
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| false
| 0
| 0
| 0.210526
| 0.736842
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
bd9f9949e13417fd14f6292afebf26ca964e5be3
| 9,439
|
py
|
Python
|
test/test_live_tv_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_live_tv_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_live_tv_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import embyapi
from embyapi.api.live_tv_service_api import LiveTvServiceApi # noqa: E501
from embyapi.rest import ApiException
class TestLiveTvServiceApi(unittest.TestCase):
"""LiveTvServiceApi unit test stubs"""
def setUp(self):
self.api = LiveTvServiceApi() # noqa: E501
def tearDown(self):
pass
def test_delete_livetv_channelmappingoptions(self):
"""Test case for delete_livetv_channelmappingoptions
"""
pass
def test_delete_livetv_channelmappings(self):
"""Test case for delete_livetv_channelmappings
"""
pass
def test_delete_livetv_listingproviders(self):
"""Test case for delete_livetv_listingproviders
Deletes a listing provider # noqa: E501
"""
pass
def test_delete_livetv_recordings_by_id(self):
"""Test case for delete_livetv_recordings_by_id
Deletes a live tv recording # noqa: E501
"""
pass
def test_delete_livetv_seriestimers_by_id(self):
"""Test case for delete_livetv_seriestimers_by_id
Cancels a live tv series timer # noqa: E501
"""
pass
def test_delete_livetv_timers_by_id(self):
"""Test case for delete_livetv_timers_by_id
Cancels a live tv timer # noqa: E501
"""
pass
def test_delete_livetv_tunerhosts(self):
"""Test case for delete_livetv_tunerhosts
Deletes a tuner host # noqa: E501
"""
pass
def test_get_livetv_channelmappingoptions(self):
"""Test case for get_livetv_channelmappingoptions
"""
pass
def test_get_livetv_channelmappings(self):
"""Test case for get_livetv_channelmappings
"""
pass
def test_get_livetv_channels(self):
"""Test case for get_livetv_channels
Gets available live tv channels. # noqa: E501
"""
pass
def test_get_livetv_channels_by_id(self):
"""Test case for get_livetv_channels_by_id
Gets a live tv channel # noqa: E501
"""
pass
def test_get_livetv_guideinfo(self):
"""Test case for get_livetv_guideinfo
Gets guide info # noqa: E501
"""
pass
def test_get_livetv_info(self):
"""Test case for get_livetv_info
Gets available live tv services. # noqa: E501
"""
pass
def test_get_livetv_listingproviders(self):
"""Test case for get_livetv_listingproviders
Gets current listing providers # noqa: E501
"""
pass
def test_get_livetv_listingproviders_available(self):
"""Test case for get_livetv_listingproviders_available
Gets listing provider # noqa: E501
"""
pass
def test_get_livetv_listingproviders_default(self):
"""Test case for get_livetv_listingproviders_default
"""
pass
def test_get_livetv_listingproviders_lineups(self):
"""Test case for get_livetv_listingproviders_lineups
Gets available lineups # noqa: E501
"""
pass
def test_get_livetv_listingproviders_schedulesdirect_countries(self):
"""Test case for get_livetv_listingproviders_schedulesdirect_countries
Gets available lineups # noqa: E501
"""
pass
def test_get_livetv_liverecordings_by_id_stream(self):
"""Test case for get_livetv_liverecordings_by_id_stream
Gets a live tv channel # noqa: E501
"""
pass
def test_get_livetv_livestreamfiles_by_id_by_container(self):
"""Test case for get_livetv_livestreamfiles_by_id_by_container
Gets a live tv channel # noqa: E501
"""
pass
def test_get_livetv_programs(self):
"""Test case for get_livetv_programs
Gets available live tv epgs.. # noqa: E501
"""
pass
def test_get_livetv_programs_recommended(self):
"""Test case for get_livetv_programs_recommended
Gets available live tv epgs.. # noqa: E501
"""
pass
def test_get_livetv_recordings(self):
"""Test case for get_livetv_recordings
Gets live tv recordings # noqa: E501
"""
pass
def test_get_livetv_recordings_by_id(self):
"""Test case for get_livetv_recordings_by_id
Gets a live tv recording # noqa: E501
"""
pass
def test_get_livetv_recordings_folders(self):
"""Test case for get_livetv_recordings_folders
Gets recording folders # noqa: E501
"""
pass
def test_get_livetv_recordings_groups(self):
"""Test case for get_livetv_recordings_groups
Gets live tv recording groups # noqa: E501
"""
pass
def test_get_livetv_recordings_groups_by_id(self):
"""Test case for get_livetv_recordings_groups_by_id
Gets a recording group # noqa: E501
"""
pass
def test_get_livetv_recordings_series(self):
"""Test case for get_livetv_recordings_series
Gets live tv recordings # noqa: E501
"""
pass
def test_get_livetv_seriestimers(self):
"""Test case for get_livetv_seriestimers
Gets live tv series timers # noqa: E501
"""
pass
def test_get_livetv_seriestimers_by_id(self):
"""Test case for get_livetv_seriestimers_by_id
Gets a live tv series timer # noqa: E501
"""
pass
def test_get_livetv_timers(self):
"""Test case for get_livetv_timers
Gets live tv timers # noqa: E501
"""
pass
def test_get_livetv_timers_by_id(self):
"""Test case for get_livetv_timers_by_id
Gets a live tv timer # noqa: E501
"""
pass
def test_get_livetv_timers_defaults(self):
"""Test case for get_livetv_timers_defaults
Gets default values for a new timer # noqa: E501
"""
pass
def test_get_livetv_tunerhosts(self):
"""Test case for get_livetv_tunerhosts
Gets tuner hosts # noqa: E501
"""
pass
def test_get_livetv_tunerhosts_types(self):
"""Test case for get_livetv_tunerhosts_types
"""
pass
def test_get_livetv_tuners_discvover(self):
"""Test case for get_livetv_tuners_discvover
"""
pass
def test_head_livetv_channelmappingoptions(self):
"""Test case for head_livetv_channelmappingoptions
"""
pass
def test_head_livetv_channelmappings(self):
"""Test case for head_livetv_channelmappings
"""
pass
def test_options_livetv_channelmappingoptions(self):
"""Test case for options_livetv_channelmappingoptions
"""
pass
def test_options_livetv_channelmappings(self):
"""Test case for options_livetv_channelmappings
"""
pass
def test_patch_livetv_channelmappingoptions(self):
"""Test case for patch_livetv_channelmappingoptions
"""
pass
def test_patch_livetv_channelmappings(self):
"""Test case for patch_livetv_channelmappings
"""
pass
def test_post_livetv_channelmappingoptions(self):
"""Test case for post_livetv_channelmappingoptions
"""
pass
def test_post_livetv_channelmappings(self):
"""Test case for post_livetv_channelmappings
"""
pass
def test_post_livetv_listingproviders(self):
"""Test case for post_livetv_listingproviders
Adds a listing provider # noqa: E501
"""
pass
def test_post_livetv_programs(self):
"""Test case for post_livetv_programs
Gets available live tv epgs.. # noqa: E501
"""
pass
def test_post_livetv_seriestimers(self):
"""Test case for post_livetv_seriestimers
Creates a live tv series timer # noqa: E501
"""
pass
def test_post_livetv_seriestimers_by_id(self):
"""Test case for post_livetv_seriestimers_by_id
Updates a live tv series timer # noqa: E501
"""
pass
def test_post_livetv_timers(self):
"""Test case for post_livetv_timers
Creates a live tv timer # noqa: E501
"""
pass
def test_post_livetv_timers_by_id(self):
"""Test case for post_livetv_timers_by_id
Updates a live tv timer # noqa: E501
"""
pass
def test_post_livetv_tunerhosts(self):
"""Test case for post_livetv_tunerhosts
Adds a tuner host # noqa: E501
"""
pass
def test_post_livetv_tuners_by_id_reset(self):
"""Test case for post_livetv_tuners_by_id_reset
Resets a tv tuner # noqa: E501
"""
pass
def test_put_livetv_channelmappingoptions(self):
"""Test case for put_livetv_channelmappingoptions
"""
pass
def test_put_livetv_channelmappings(self):
"""Test case for put_livetv_channelmappings
"""
pass
if __name__ == '__main__':
unittest.main()
| 23.896203
| 78
| 0.637144
| 1,111
| 9,439
| 5.078308
| 0.10261
| 0.09252
| 0.105282
| 0.143566
| 0.830202
| 0.737859
| 0.521446
| 0.357497
| 0.193194
| 0.133286
| 0
| 0.01882
| 0.296324
| 9,439
| 394
| 79
| 23.956853
| 0.830623
| 0.447505
| 0
| 0.458333
| 1
| 0
| 0.001869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0.458333
| 0.041667
| 0
| 0.516667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
bdb88a765d8bc6b5e477b1b64ee817472716d342
| 66,946
|
py
|
Python
|
tests/pyoozie/test_client.py
|
Shopify/pyoozie
|
6dcbcb8249820dc40b5a9bbbc04b0370240fd706
|
[
"MIT"
] | 9
|
2017-03-16T15:30:23.000Z
|
2019-08-07T03:35:59.000Z
|
tests/pyoozie/test_client.py
|
Shopify/pyoozie
|
6dcbcb8249820dc40b5a9bbbc04b0370240fd706
|
[
"MIT"
] | 43
|
2017-02-01T22:20:52.000Z
|
2020-08-10T08:48:20.000Z
|
tests/pyoozie/test_client.py
|
Shopify/pyoozie
|
6dcbcb8249820dc40b5a9bbbc04b0370240fd706
|
[
"MIT"
] | 5
|
2017-02-08T17:59:06.000Z
|
2020-11-12T11:29:08.000Z
|
# Copyright (c) 2017 "Shopify inc." All rights reserved.
# Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
from __future__ import unicode_literals
import copy
import mock
import pytest
import requests_mock
import requests
from pyoozie import exceptions
from pyoozie import model
from pyoozie import client
from pyoozie import xml
# TODO: share these with test_model.py?
SAMPLE_COORD_ID = '0123456-123456789012345-oozie-oozi-C'
SAMPLE_COORD_ACTION = '0123456-123456789012345-oozie-oozi-C@12'
SAMPLE_WF_ID = '0123456-123456789012345-oozie-oozi-W'
SAMPLE_WF_ACTION = '0123456-123456789012345-oozie-oozi-W@foo'
@pytest.fixture
def oozie_config():
return {
'url': 'http://localhost:11000/oozie',
'user': 'oozie',
'timeout': 30,
'verbose': False,
'launcher_memory_in_mb': '5000',
'launcher_queue': 'test.ignore',
}
@pytest.fixture
def api(oozie_config):
with mock.patch('pyoozie.client.OozieClient._test_connection'):
yield client.OozieClient(**oozie_config)
@pytest.fixture
def api_with_session(oozie_config):
with mock.patch('pyoozie.client.OozieClient._test_connection'):
session = requests.Session()
session.headers.update({'test-header': 'true'})
yield client.OozieClient(session=session, **oozie_config)
@pytest.fixture
def sample_coordinator_running(api):
info = {
'coordJobId': SAMPLE_COORD_ID,
'status': 'RUNNING'
}
return model.Coordinator(api, info, None)
@pytest.fixture
def sample_coordinator_suspended(api):
info = {
'coordJobId': SAMPLE_COORD_ID,
'status': 'SUSPENDED'
}
return model.Coordinator(api, info, None)
@pytest.fixture
def sample_coordinator_killed(api):
info = {
'coordJobId': SAMPLE_COORD_ID,
'status': 'KILLED'
}
return model.Coordinator(api, info, None)
@pytest.fixture
def sample_coordinator_action_running(api, sample_coordinator_running):
info = {
'id': SAMPLE_COORD_ACTION,
'status': 'RUNNING'
}
action = model.CoordinatorAction(api, info, sample_coordinator_running)
action.parent().actions = {12: action}
return action
@pytest.fixture
def sample_coordinator_action_suspended(api, sample_coordinator_running):
info = {
'id': SAMPLE_COORD_ACTION,
'status': 'SUSPENDED'
}
action = model.CoordinatorAction(api, info, sample_coordinator_running)
action.parent().actions = {12: action}
return action
@pytest.fixture
def sample_coordinator_action_killed(api, sample_coordinator_running):
info = {
'id': SAMPLE_COORD_ACTION,
'status': 'KILLED'
}
action = model.CoordinatorAction(api, info, sample_coordinator_running)
action.parent().actions = {12: action}
return action
@pytest.fixture
def sample_coordinator_action_killed_with_killed_coordinator(api, sample_coordinator_killed):
info = {
'id': SAMPLE_COORD_ACTION,
'status': 'KILLED'
}
action = model.CoordinatorAction(api, info, sample_coordinator_killed)
action.parent().actions = {12: action}
return action
@pytest.fixture
def sample_workflow_running(api):
info = {
'id': SAMPLE_WF_ID,
'status': 'RUNNING'
}
return model.Workflow(api, info, None)
@pytest.fixture
def sample_workflow_suspended(api):
info = {
'id': SAMPLE_WF_ID,
'status': 'SUSPENDED'
}
return model.Workflow(api, info, None)
@pytest.fixture
def sample_workflow_killed(api):
info = {
'id': SAMPLE_WF_ID,
'status': 'KILLED'
}
return model.Workflow(api, info, None)
@pytest.fixture
def sample_workflow_prep(api):
info = {
'id': SAMPLE_WF_ID,
'status': 'PREP'
}
return model.Workflow(api, info, None)
class TestOozieClientCore(object):
@mock.patch('pyoozie.client.OozieClient._test_connection')
def test_construction(self, mock_test_conn, oozie_config):
api = client.OozieClient(**oozie_config)
assert not mock_test_conn.called
assert api._url == 'http://localhost:11000/oozie'
assert api._session
@mock.patch('pyoozie.client.OozieClient._test_connection')
def test_construction_custom_session(self, mock_test_conn, oozie_config):
session = requests.Session()
session.auth = ('user', 'pass')
api = client.OozieClient(session=session, **oozie_config)
assert not mock_test_conn.called
assert api._session.auth == session.auth
def test_test_connection(self, oozie_config):
with requests_mock.mock() as m:
session = requests.Session()
m.get('http://localhost:11000/oozie/versions', text='[0, 1, 2]')
client.OozieClient(**oozie_config)._test_connection()
client.OozieClient(session=session, **oozie_config)._test_connection()
m.get('http://localhost:11000/oozie/versions', text='[0, 1]')
with pytest.raises(exceptions.OozieException) as err:
client.OozieClient(**oozie_config)._test_connection()
assert 'does not support API version 2' in str(err)
m.get('http://localhost:11000/oozie/versions', status_code=404)
with pytest.raises(exceptions.OozieException) as err:
client.OozieClient(**oozie_config)._test_connection()
assert 'Unable to contact Oozie server' in str(err)
m.get('http://localhost:11000/oozie/versions', text='>>> fail <<<')
with pytest.raises(exceptions.OozieException) as err:
client.OozieClient(**oozie_config)._test_connection()
assert 'Invalid response from Oozie server' in str(err)
def test_test_connection_is_called_once(self, oozie_config):
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/admin/build-version', text='{}')
with mock.patch('pyoozie.client.OozieClient._test_connection') as m_test:
oozie_client = client.OozieClient(**oozie_config)
oozie_client.admin_build_version()
oozie_client.admin_build_version()
m_test.assert_called_once_with()
def test_request(self, api):
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/endpoint', text='{"result": "pass"}')
result = api._request('GET', 'endpoint', None, None)
assert result['result'] == 'pass'
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/endpoint')
result = api._request('GET', 'endpoint', None, None)
assert result is None
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/endpoint', text='>>> fail <<<')
with pytest.raises(exceptions.OozieException) as err:
api._request('GET', 'endpoint', None, None)
assert 'Invalid response from Oozie server' in str(err)
def test_request_uses_session_params(self, api_with_session):
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/endpoint', text='{"result": "pass"}')
result = api_with_session._request('GET', 'endpoint', None, None)
assert result['result'] == 'pass'
assert m.last_request.headers['test-header'] == 'true'
def test_get(self, api):
with requests_mock.mock() as m:
m.get('http://localhost:11000/oozie/v2/endpoint', text='{"result": "pass"}')
result = api._get('endpoint')
assert result['result'] == 'pass'
def test_put(self, api):
with requests_mock.mock() as m:
headers = {'Content-Type': 'application/xml'}
m.put('http://localhost:11000/oozie/v2/endpoint', request_headers=headers)
result = api._put('endpoint')
assert result is None
def test_post(self, api):
with requests_mock.mock() as m:
headers = {'Content-Type': 'application/xml'}
m.post('http://localhost:11000/oozie/v2/endpoint', request_headers=headers, text='{"result": "pass"}')
result = api._post('endpoint', content='<xml/>')
assert result['result'] == 'pass'
def test_headers(self, api):
headers = api._headers()
assert headers == {}
headers = api._headers(content_type='foo/bar')
assert headers == {'Content-Type': 'foo/bar'}
class TestOozieClientAdmin(object):
@pytest.mark.parametrize("function, endpoint", [
('admin_status', 'status'),
('admin_os_env', 'os-env'),
('admin_java_properties', 'java-sys-properties'),
('admin_configuration', 'configuration'),
('admin_instrumentation', 'instrumentation'),
('admin_metrics', 'metrics'),
('admin_build_version', 'build-version'),
('admin_available_timezones', 'available-timezones'),
('admin_queue_dump', 'queue-dump'),
('admin_available_oozie_servers', 'available-oozie-servers'),
])
def test_simple_admin(self, function, endpoint, api):
with mock.patch.object(api, '_get', return_value=True) as mock_get:
assert api.__getattribute__(function)()
mock_get.assert_called_with('admin/' + endpoint)
def test_admin_list_sharelib(self, api):
reply = {
'sharelib': [
{'name': 'oozie'},
{'name': 'hive'},
{'name': 'distcp'},
{'name': 'hcatalog'},
{'name': 'sqoop'},
{'name': 'mapreduce-streaming'},
{'name': 'spark'},
{'name': 'hive2'},
{'name': 'pig'}
]
}
expected = ['oozie', 'hive', 'distcp', 'hcatalog', 'sqoop', 'mapreduce-streaming', 'spark', 'hive2', 'pig']
with mock.patch.object(api, '_get', return_value=reply) as mock_get:
assert api.admin_list_sharelib() == expected
mock_get.assert_called_with('admin/list_sharelib')
def test_admin_list_all_sharelib(self, api):
libs = {
'admin/list_sharelib?lib=oozie': {'sharelib': [{'files': ['oozie1', 'oozie2'], 'name': 'oozie'}]},
'admin/list_sharelib?lib=distcp': {'sharelib': [{'files': ['distcp1', 'distcp2'], 'name': 'distcp'}]},
}
expected = {
'oozie': ['oozie1', 'oozie2'],
'distcp': ['distcp1', 'distcp2'],
}
with mock.patch.object(api, 'admin_list_sharelib', return_value=['oozie', 'distcp']):
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = lambda endpoint: libs[endpoint]
result = api.admin_list_all_sharelib()
assert result == expected
class TestOozieClientJobsQuery(object):
def test_jobs_query_workflow_parameters(self, api):
mock_result = {
'total': 0,
'workflows': []
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
api._jobs_query(model.ArtifactType.Workflow)
mock_get.assert_called_with('jobs?jobtype=wf&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Workflow, user='john_doe')
mock_get.assert_called_with('jobs?jobtype=wf&filter=user=john_doe&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Workflow, name='my_workflow')
mock_get.assert_called_with('jobs?jobtype=wf&filter=name=my_workflow&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Workflow, status=model.WorkflowStatus.RUNNING)
mock_get.assert_called_with('jobs?jobtype=wf&filter=status=RUNNING&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Workflow, status=model.WorkflowStatus.running())
mock_get.assert_called_with('jobs?jobtype=wf&filter=status=RUNNING;status=SUSPENDED&offset=1&len=5000')
api._jobs_query(
model.ArtifactType.Workflow,
user='john_doe',
name='my_workflow',
status=model.WorkflowStatus.running())
mock_get.assert_called_with('jobs?jobtype=wf&filter=user=john_doe;name=my_workflow;status=RUNNING;'
'status=SUSPENDED&offset=1&len=5000')
def test_jobs_query_coordinator_parameters(self, api):
mock_result = {
'total': 0,
'coordinatorjobs': []
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
api._jobs_query(model.ArtifactType.Coordinator)
mock_get.assert_called_with('jobs?jobtype=coordinator&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Coordinator, user='john_doe')
mock_get.assert_called_with('jobs?jobtype=coordinator&filter=user=john_doe&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Coordinator, name='my_coordinator')
mock_get.assert_called_with('jobs?jobtype=coordinator&filter=name=my_coordinator&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Coordinator, status=model.CoordinatorStatus.RUNNING)
mock_get.assert_called_with('jobs?jobtype=coordinator&filter=status=RUNNING&offset=1&len=5000')
api._jobs_query(model.ArtifactType.Coordinator, status=model.CoordinatorStatus.running())
mock_get.assert_called_with('jobs?jobtype=coordinator&filter=status=RUNNING;status=RUNNINGWITHERROR;'
'status=SUSPENDED;status=SUSPENDEDWITHERROR&offset=1&len=5000')
api._jobs_query(
model.ArtifactType.Coordinator,
user='john_doe',
name='my_coordinator',
status=model.CoordinatorStatus.running())
mock_get.assert_called_with('jobs?jobtype=coordinator&filter=user=john_doe;name=my_coordinator;'
'status=RUNNING;status=RUNNINGWITHERROR;status=SUSPENDED;'
'status=SUSPENDEDWITHERROR&offset=1&len=5000')
def test_jobs_query_bad_parameters(self, api):
with pytest.raises(KeyError) as err:
api._jobs_query(model.ArtifactType.CoordinatorAction)
assert 'ArtifactType.CoordinatorAction' in str(err)
with pytest.raises(KeyError) as err:
api._jobs_query(model.ArtifactType.WorkflowAction)
assert 'ArtifactType.WorkflowAction' in str(err)
@mock.patch.object(model.Workflow, 'fill_in_details', side_effect=lambda c: c, autospec=True)
def test_jobs_query_workflow_pagination(self, _, api):
mock_results = iter(
[
{
'total': 5001,
'workflows': [{'id': '1-W'}, {'id': '2-W'}]
},
{
'total': 5001,
'workflows': [{'id': '3-W'}]
}
]
)
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = lambda url: next(mock_results)
result = api._jobs_query(model.ArtifactType.Workflow)
assert len(result) == 3
mock_get.assert_any_call('jobs?jobtype=wf&offset=1&len=5000')
mock_get.assert_any_call('jobs?jobtype=wf&offset=5001&len=5000')
with pytest.raises(StopIteration):
next(mock_results)
@pytest.mark.parametrize('limit, expected_result_count, expected_queries', [
(0, 3, ['jobs?jobtype=coordinator&offset=1&len=5000', 'jobs?jobtype=coordinator&offset=5001&len=5000']),
(2, 2, ['jobs?jobtype=coordinator&offset=1&len=2']),
(6000, 3, ['jobs?jobtype=coordinator&offset=1&len=5000', 'jobs?jobtype=coordinator&offset=5001&len=5000'])
])
@mock.patch.object(model.Coordinator, 'fill_in_details', side_effect=lambda c: c, autospec=True)
def test_jobs_query_coordinator_pagination(self, _, limit, expected_result_count, expected_queries, api):
mock_results = iter(
[
{
'total': 5001,
'coordinatorjobs': [{'coordJobId': '1-C'}, {'coordJobId': '2-C'}]
},
{
'total': 5001,
'coordinatorjobs': [{'coordJobId': '3-C'}]
}
]
)
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = lambda url: next(mock_results)
result = api._jobs_query(model.ArtifactType.Coordinator, limit=limit)
assert len(result) == expected_result_count
mock_get.assert_has_calls(mock.call(query) for query in expected_queries)
@mock.patch.object(model.Workflow, 'fill_in_details', side_effect=lambda c: c, autospec=True)
def test_jobs_query_workflow_details(self, fill_in_details, api):
mock_result = {
'total': 1,
'workflows': [{'id': '1-W'}]
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
api._jobs_query(model.ArtifactType.Workflow, details=False)
mock_get.assert_called_with('jobs?jobtype=wf&offset=1&len=5000')
assert not fill_in_details.called
api._jobs_query(model.ArtifactType.Workflow, details=True)
mock_get.assert_called_with('jobs?jobtype=wf&offset=1&len=5000')
assert fill_in_details.called
@mock.patch.object(model.Coordinator, 'fill_in_details', side_effect=lambda c: c, autospec=True)
def test_jobs_query_coordinator_details(self, fill_in_details, api):
mock_result = {
'total': 1,
'coordinatorjobs': [{'coordJobId': '1-C'}]
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
api._jobs_query(model.ArtifactType.Coordinator, details=False)
mock_get.assert_called_with('jobs?jobtype=coordinator&offset=1&len=5000')
assert not fill_in_details.called
api._jobs_query(model.ArtifactType.Coordinator, details=True)
mock_get.assert_called_with('jobs?jobtype=coordinator&offset=1&len=5000')
assert fill_in_details.called
def test_jobs_all_workflows(self, api, sample_workflow_running):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_workflow_running]
api.jobs_all_workflows()
mock_query.assert_called_with(model.ArtifactType.Workflow, name=None, user=None, limit=0)
api.jobs_all_workflows(name='my_workflow')
mock_query.assert_called_with(model.ArtifactType.Workflow, name='my_workflow', user=None, limit=0)
api.jobs_all_workflows(user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Workflow, name=None, user='john_doe', limit=0)
api.jobs_all_workflows(name='my_workflow', user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Workflow, name='my_workflow', user='john_doe', limit=0)
api.jobs_all_workflows(name='my_workflow', limit=10)
mock_query.assert_called_with(model.ArtifactType.Workflow, name='my_workflow', user=None, limit=10)
def test_jobs_all_active_workflows(self, api, sample_workflow_running):
expected_statuses = model.WorkflowStatus.active()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_workflow_running]
api.jobs_all_active_workflows()
mock_query.assert_called_with(
model.ArtifactType.Workflow, details=True, user=None, status=expected_statuses
)
api.jobs_all_active_workflows(user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Workflow, details=True, user='john_doe', status=expected_statuses
)
def test_jobs_all_running_workflows(self, api, sample_workflow_running):
expected_statuses = model.WorkflowStatus.running()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_workflow_running]
api.jobs_all_running_workflows()
mock_query.assert_called_with(
model.ArtifactType.Workflow, details=True, user=None, status=expected_statuses
)
api.jobs_all_running_workflows(user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Workflow, details=True, user='john_doe', status=expected_statuses
)
def test_jobs_running_workflows(self, api, sample_workflow_running):
expected_statuses = model.WorkflowStatus.running()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_workflow_running]
api.jobs_running_workflows('my_workflow')
mock_query.assert_called_with(
model.ArtifactType.Workflow,
details=True,
name='my_workflow',
user=None,
status=expected_statuses)
api.jobs_running_workflows('my_workflow', user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Workflow,
details=True,
name='my_workflow',
user='john_doe',
status=expected_statuses)
def test_jobs_last_workflow_parameters(self, api, sample_workflow_running):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_workflow_running]
api.jobs_last_workflow('my_workflow')
mock_query.assert_called_with(model.ArtifactType.Workflow, name='my_workflow', user=None, limit=1)
api.jobs_last_workflow('my_workflow', user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Workflow, name='my_workflow', user='john_doe', limit=1)
def test_jobs_workflow_names_parameters(self, api):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = []
api.jobs_workflow_names()
mock_query.assert_called_with(model.ArtifactType.Workflow, user=None, details=False, limit=0)
api.jobs_workflow_names(user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Workflow, user='john_doe', details=False, limit=0)
def test_jobs_all_coordinators(self, api, sample_coordinator_running):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_running]
api.jobs_all_coordinators()
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, name=None, user=None, limit=0
)
api.jobs_all_coordinators(name='my_coordinator')
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, name='my_coordinator', user=None, limit=0
)
api.jobs_all_coordinators(user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, name=None, user='john_doe', limit=0
)
api.jobs_all_coordinators(name='my_coordinator', user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator,
details=True,
name='my_coordinator',
user='john_doe',
limit=0)
api.jobs_all_coordinators(name='my_coordinator', limit=1)
mock_query.assert_called_with(
model.ArtifactType.Coordinator,
details=True,
name='my_coordinator',
user=None,
limit=1)
def test_jobs_all_active_coordinators(self, api, sample_coordinator_running):
expected_statuses = model.CoordinatorStatus.active()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_running]
api.jobs_all_active_coordinators()
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, user=None, status=expected_statuses
)
api.jobs_all_active_coordinators(user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, user='john_doe', status=expected_statuses
)
def test_jobs_all_running_coordinators(self, api, sample_coordinator_running):
expected_statuses = model.CoordinatorStatus.running()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_running]
api.jobs_all_running_coordinators()
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, user=None, status=expected_statuses
)
api.jobs_all_running_coordinators(user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator, details=True, user='john_doe', status=expected_statuses
)
def test_jobs_all_suspended_coordinators(self, api, sample_coordinator_suspended):
expected_statuses = model.CoordinatorStatus.suspended()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_suspended]
api.jobs_all_suspended_coordinators()
mock_query.assert_called_with(model.ArtifactType.Coordinator, user=None, status=expected_statuses)
api.jobs_all_suspended_coordinators(user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Coordinator, user='john_doe', status=expected_statuses)
def test_jobs_running_coordinators(self, api, sample_coordinator_running):
expected_statuses = model.CoordinatorStatus.running()
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_running]
api.jobs_running_coordinators('my_coordinator')
mock_query.assert_called_with(
model.ArtifactType.Coordinator,
name='my_coordinator',
user=None,
status=expected_statuses)
api.jobs_running_coordinators('my_coordinator', user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator,
name='my_coordinator',
user='john_doe',
status=expected_statuses)
def test_jobs_last_coordinator_parameters(self, api, sample_coordinator_running):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = [sample_coordinator_running]
api.jobs_last_coordinator('my_coordinator')
mock_query.assert_called_with(model.ArtifactType.Coordinator, name='my_coordinator', user=None, limit=1)
api.jobs_last_coordinator('my_coordinator', user='john_doe')
mock_query.assert_called_with(
model.ArtifactType.Coordinator,
name='my_coordinator',
user='john_doe',
limit=1)
def test_jobs_coordinator_names_parameters(self, api):
with mock.patch.object(api, '_jobs_query') as mock_query:
mock_query.return_value = []
api.jobs_coordinator_names()
mock_query.assert_called_with(model.ArtifactType.Coordinator, user=None, details=False)
api.jobs_coordinator_names(user='john_doe')
mock_query.assert_called_with(model.ArtifactType.Coordinator, user='john_doe', details=False)
class TestOozieClientJobCoordinatorQuery(object):
def test_coordinator_query_parameters(self, api):
mock_coord = {
'total': 0,
'coordJobId': SAMPLE_COORD_ID,
'actions': []
}
mock_action = {
'id': SAMPLE_COORD_ACTION,
}
with mock.patch.object(api, '_get') as mock_get:
def dummy_get(url):
if url.startswith('job/' + SAMPLE_COORD_ID + '?'):
return mock_coord
elif url.startswith('job/' + SAMPLE_COORD_ID + '@'):
return mock_action
assert False, 'Unexpected URL'
mock_get.side_effect = dummy_get
with pytest.raises(ValueError) as err:
api._coordinator_query('foo')
assert 'Unrecognized job ID' in str(err)
assert not mock_get.called
api._coordinator_query(SAMPLE_COORD_ID)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=1&len=1')
mock_get.reset_mock()
with pytest.raises(ValueError) as err:
api._coordinator_query(SAMPLE_COORD_ID + '@foo')
assert 'Unrecognized job ID' in str(err)
assert not mock_get.called
api._coordinator_query(SAMPLE_COORD_ACTION)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=12&len=1')
mock_get.assert_any_call('job/' + SAMPLE_COORD_ACTION)
mock_get.reset_mock()
api._coordinator_query(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.RUNNING)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=1&len=1&filter=status=RUNNING')
mock_get.reset_mock()
api._coordinator_query(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.running())
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID +
'?offset=1&len=1&filter=status=RUNNING;status=SUSPENDED')
mock_get.reset_mock()
with pytest.raises(ValueError) as err:
api._coordinator_query(SAMPLE_COORD_ACTION, status=model.CoordinatorActionStatus.RUNNING)
assert 'Cannot supply both coordinator action ID and status' in str(err)
assert not mock_get.called
def test_coordinator_query_limits(self, api):
mock_result = {
'total': 100,
'coordJobId': SAMPLE_COORD_ID,
'actions': []
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
with pytest.raises(ValueError) as err:
api._coordinator_query(SAMPLE_COORD_ACTION, start=1)
assert 'Cannot supply both coordinator action ID and start / limit' in str(err)
with pytest.raises(ValueError) as err:
api._coordinator_query(SAMPLE_COORD_ACTION, limit=10)
assert 'Cannot supply both coordinator action ID and start / limit' in str(err)
api._coordinator_query(SAMPLE_COORD_ID)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=1&len=1')
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=1&len=100')
api._coordinator_query(SAMPLE_COORD_ID, start=10)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=10&len=1')
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=10&len=91')
api._coordinator_query(SAMPLE_COORD_ID, limit=10)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?order=desc&offset=1&len=10')
api._coordinator_query(SAMPLE_COORD_ID, start=10, limit=10)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=10&len=10')
api._coordinator_query(SAMPLE_COORD_ID, start=99, limit=10)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=99&len=10')
api._coordinator_query(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.RUNNING, start=10, limit=10)
mock_get.assert_any_call('job/' + SAMPLE_COORD_ID + '?offset=10&len=10&filter=status=RUNNING')
def test_coordinator_query_exception(self, api):
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = exceptions.OozieException.communication_error('A bad thing')
with pytest.raises(exceptions.OozieException) as err:
api._coordinator_query(SAMPLE_COORD_ID)
assert "Coordinator '" + SAMPLE_COORD_ID + "' not found" in str(err)
assert 'A bad thing' in str(err.value.caused_by)
def test_coordinator_action_query(self, api):
mock_result = {
'id': SAMPLE_COORD_ACTION,
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
mock_coord = mock.Mock()
mock_coord.actions = {}
action = api._coordinator_action_query(SAMPLE_COORD_ID, 12, coordinator=mock_coord)
mock_get.assert_called_with('job/' + SAMPLE_COORD_ACTION)
assert action._parent == mock_coord
def test_coordinator_action_query_exception(self, api):
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = exceptions.OozieException.communication_error('A bad thing')
with pytest.raises(exceptions.OozieException) as err:
api._coordinator_action_query(SAMPLE_COORD_ID, 12)
assert "Coordinator action '" + SAMPLE_COORD_ID + "@12' not found" in str(err)
assert 'A bad thing' in str(err.value.caused_by)
def test_decode_coord_id(self, api, sample_coordinator_running):
with mock.patch.object(api, 'jobs_last_coordinator') as mock_last:
mock_last.return_value = mock.Mock(coordJobId=SAMPLE_COORD_ID)
with pytest.raises(ValueError) as err:
api._decode_coord_id()
assert 'Supply exactly one of coordinator_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_coord_id(coordinator_id=SAMPLE_COORD_ID, name='my_coordinator')
assert 'Supply exactly one of coordinator_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_coord_id(coordinator_id=SAMPLE_COORD_ID, user='john_doe')
assert 'User parameter not supported with coordinator_id' in str(err)
result = api._decode_coord_id(coordinator_id=SAMPLE_COORD_ID)
assert result == SAMPLE_COORD_ID
result = api._decode_coord_id(name='my_coordinator')
assert result == SAMPLE_COORD_ID
mock_last.assert_called_with(name='my_coordinator', user=None)
result = api._decode_coord_id(name='my_coordinator', user='john_doe')
assert result == SAMPLE_COORD_ID
mock_last.assert_called_with(name='my_coordinator', user='john_doe')
mock_last.return_value = None
with pytest.raises(exceptions.OozieException) as err:
api._decode_coord_id(name='my_coordinator')
assert "Coordinator 'my_coordinator' not found" in str(err)
result = api._decode_coord_id(coordinator=sample_coordinator_running)
assert result == SAMPLE_COORD_ID
with pytest.raises(ValueError) as err:
api._decode_coord_id(coordinator_id=SAMPLE_COORD_ID, coordinator=sample_coordinator_running)
assert 'Supply either a coordinator object or one of coordinator_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_coord_id(name='my_coordinator', coordinator=sample_coordinator_running)
assert 'Supply either a coordinator object or one of coordinator_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_coord_id(coordinator=sample_coordinator_running, user='john_doe')
assert 'User parameter not supported with coordinator object' in str(err)
def test_job_coordinator_info(self, api):
with mock.patch.object(api, '_coordinator_query') as mock_query:
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
mock_decode.return_value = SAMPLE_COORD_ID
api.job_coordinator_info(coordinator_id=SAMPLE_COORD_ID)
mock_decode.assert_called_with(SAMPLE_COORD_ID, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=0)
api.job_coordinator_info(name='my_coordinator')
mock_decode.assert_called_with(None, 'my_coordinator', None)
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=0)
api.job_coordinator_info(name='my_coordinator', user='john_doe')
mock_decode.assert_called_with(None, 'my_coordinator', 'john_doe')
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=0)
api.job_coordinator_info(coordinator_id=SAMPLE_COORD_ID, limit=10)
mock_decode.assert_called_with(SAMPLE_COORD_ID, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=10)
def test_job_last_coordinator_info(self, api):
with mock.patch.object(api, '_coordinator_query') as mock_query:
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
mock_decode.return_value = SAMPLE_COORD_ID
api.job_last_coordinator_info(coordinator_id=SAMPLE_COORD_ID)
mock_decode.assert_called_with(SAMPLE_COORD_ID, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=1)
api.job_last_coordinator_info(name='my_coordinator')
mock_decode.assert_called_with(None, 'my_coordinator', None)
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=1)
api.job_last_coordinator_info(name='my_coordinator', user='john_doe')
mock_decode.assert_called_with(None, 'my_coordinator', 'john_doe')
mock_query.assert_called_with(SAMPLE_COORD_ID, limit=1)
def test_job_coordinator_action(self, api):
with mock.patch.object(api, '_coordinator_action_query') as mock_query:
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
mock_decode.return_value = SAMPLE_COORD_ID
api.job_coordinator_action(SAMPLE_COORD_ACTION)
mock_decode.assert_called_with(SAMPLE_COORD_ACTION, None, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, 12, coordinator=None)
api.job_coordinator_action(SAMPLE_COORD_ID, action_number=12)
mock_decode.assert_called_with(SAMPLE_COORD_ID, None, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, 12, coordinator=None)
api.job_coordinator_action(name='my_coordinator', action_number=12)
mock_decode.assert_called_with(None, 'my_coordinator', None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, 12, coordinator=None)
api.job_coordinator_action(name='my_coordinator', user='john_doe', action_number=12)
mock_decode.assert_called_with(None, 'my_coordinator', 'john_doe', None)
mock_query.assert_called_with(SAMPLE_COORD_ID, 12, coordinator=None)
with pytest.raises(ValueError) as err:
api.job_coordinator_action(SAMPLE_COORD_ACTION, action_number=12)
assert 'Supply exactly one of coordinator_id or action_number' in str(err)
with pytest.raises(ValueError) as err:
api.job_coordinator_action(name='my_coordinator')
assert 'No action_number supplied' in str(err)
def test_job_coordinator_all_active_actions(self, api, sample_coordinator_running,
sample_coordinator_action_running):
with mock.patch.object(api, '_coordinator_query') as mock_query:
mock_query.return_value = sample_coordinator_action_running.parent()
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
mock_decode.return_value = SAMPLE_COORD_ID
api.job_coordinator_all_active_actions(coordinator_id=SAMPLE_COORD_ID)
mock_decode.assert_called_with(SAMPLE_COORD_ID, None, None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.active())
api.job_coordinator_all_active_actions(name='my_coordinator')
mock_decode.assert_called_with(None, 'my_coordinator', None, None)
mock_query.assert_called_with(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.active())
api.job_coordinator_all_active_actions(name='my_coordinator', user='john_doe')
mock_decode.assert_called_with(None, 'my_coordinator', 'john_doe', None)
mock_query.assert_called_with(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.active())
sample_coordinator = copy.copy(sample_coordinator_running)
sample_coordinator.actions = None
api.job_coordinator_all_active_actions(coordinator=sample_coordinator)
mock_decode.assert_called_with(None, None, None, sample_coordinator)
mock_query.assert_called_with(SAMPLE_COORD_ID, status=model.CoordinatorActionStatus.active())
assert sample_coordinator.actions
assert sample_coordinator.actions[12] == sample_coordinator_action_running
class TestOozieClientJobWorkflowQuery(object):
def test_workflow_query_parameters(self, api):
mock_result = {
'total': 0,
'id': SAMPLE_WF_ID,
'actions': []
}
with mock.patch.object(api, '_get') as mock_get:
mock_get.return_value = mock_result
with pytest.raises(ValueError) as err:
api._workflow_query('foo')
assert 'Unrecognized job ID' in str(err)
api._workflow_query(SAMPLE_WF_ID)
mock_get.assert_called_with('job/' + SAMPLE_WF_ID)
api._workflow_query(SAMPLE_WF_ACTION)
mock_get.assert_called_with('job/' + SAMPLE_WF_ID)
def test_workflow_query_exception(self, api):
with mock.patch.object(api, '_get') as mock_get:
mock_get.side_effect = exceptions.OozieException.communication_error('A bad thing')
with pytest.raises(exceptions.OozieException) as err:
api._workflow_query(SAMPLE_WF_ID)
assert "Workflow '" + SAMPLE_WF_ID + "' not found" in str(err)
assert 'A bad thing' in str(err.value.caused_by)
def test_decode_wf_id(self, api):
with mock.patch.object(api, 'jobs_last_workflow') as mock_last:
mock_last.return_value = mock.Mock(id=SAMPLE_WF_ID)
with pytest.raises(ValueError) as err:
api._decode_wf_id()
assert 'Supply exactly one of workflow_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_wf_id(workflow_id=SAMPLE_WF_ID, name='my_workflow')
assert 'Supply exactly one of workflow_id or name' in str(err)
with pytest.raises(ValueError) as err:
api._decode_wf_id(workflow_id=SAMPLE_WF_ID, user='john_doe')
assert 'User parameter not supported with workflow_id' in str(err)
result = api._decode_wf_id(workflow_id=SAMPLE_WF_ID)
assert result == SAMPLE_WF_ID
result = api._decode_wf_id(name='my_workflow')
assert result == SAMPLE_WF_ID
mock_last.assert_called_with(name='my_workflow', user=None)
result = api._decode_wf_id(name='my_workflow', user='john_doe')
assert result == SAMPLE_WF_ID
mock_last.assert_called_with(name='my_workflow', user='john_doe')
mock_last.return_value = None
with pytest.raises(exceptions.OozieException) as err:
api._decode_wf_id(name='my_workflow')
assert "Workflow 'my_workflow' not found" in str(err)
def test_job_workflow_info(self, api):
with mock.patch.object(api, '_workflow_query') as mock_query:
with mock.patch.object(api, '_decode_wf_id') as mock_decode:
mock_decode.return_value = SAMPLE_WF_ID
api.job_workflow_info(workflow_id=SAMPLE_WF_ID)
mock_decode.assert_called_with(SAMPLE_WF_ID, None, None)
mock_query.assert_called_with(SAMPLE_WF_ID)
api.job_workflow_info(name='my_workflow')
mock_decode.assert_called_with(None, 'my_workflow', None)
mock_query.assert_called_with(SAMPLE_WF_ID)
api.job_workflow_info(name='my_workflow', user='john_doe')
mock_decode.assert_called_with(None, 'my_workflow', 'john_doe')
mock_query.assert_called_with(SAMPLE_WF_ID)
class TestOozieClientJobQuery(object):
def test_job_info(self, api):
with mock.patch.object(api, 'job_coordinator_info') as mock_coord_info:
with mock.patch.object(api, 'job_workflow_info') as mock_workflow_info:
api.job_info(SAMPLE_COORD_ID)
mock_coord_info.assert_called_with(coordinator_id=SAMPLE_COORD_ID)
assert not mock_workflow_info.called
mock_coord_info.reset_mock()
api.job_info(SAMPLE_COORD_ACTION)
mock_coord_info.assert_called_with(coordinator_id=SAMPLE_COORD_ACTION)
assert not mock_workflow_info.called
mock_coord_info.reset_mock()
api.job_info(SAMPLE_WF_ID)
mock_workflow_info.assert_called_with(workflow_id=SAMPLE_WF_ID)
assert not mock_coord_info.called
mock_workflow_info.reset_mock()
api.job_info(SAMPLE_WF_ACTION)
mock_workflow_info.assert_called_with(workflow_id=SAMPLE_WF_ACTION)
assert not mock_coord_info.called
mock_workflow_info.reset_mock()
with pytest.raises(exceptions.OozieException) as err:
api.job_info("wat?")
assert "'wat?' does not match any known job" in str(err)
assert not mock_coord_info.called
assert not mock_workflow_info.called
def test_job_action_info(self, api):
with mock.patch.object(api, 'job_coordinator_info') as mock_coord_info:
with mock.patch.object(api, 'job_workflow_info') as mock_workflow_info:
api.job_action_info(SAMPLE_COORD_ID)
mock_coord_info.assert_called_with(coordinator_id=SAMPLE_COORD_ID)
assert not mock_coord_info.action.called
assert not mock_workflow_info.called
mock_coord_info.reset_mock()
api.job_action_info(SAMPLE_COORD_ACTION)
mock_coord_info.assert_called_with(coordinator_id=SAMPLE_COORD_ACTION)
mock_coord_info().action.assert_called_with(12)
assert not mock_workflow_info.called
mock_coord_info.reset_mock()
api.job_action_info(SAMPLE_WF_ID)
mock_workflow_info.assert_called_with(workflow_id=SAMPLE_WF_ID)
assert not mock_workflow_info.action.called
assert not mock_coord_info.called
mock_workflow_info.reset_mock()
api.job_action_info(SAMPLE_WF_ACTION)
mock_workflow_info.assert_called_with(workflow_id=SAMPLE_WF_ACTION)
mock_workflow_info().action.assert_called_with('foo')
assert not mock_coord_info.called
mock_workflow_info.reset_mock()
with pytest.raises(exceptions.OozieException) as err:
api.job_action_info("wat?")
assert "'wat?' does not match any known job" in str(err)
assert not mock_coord_info.called
assert not mock_workflow_info.called
class TestOozieClientJobCoordinatorManage(object):
def test_fetch_coordinator_or_action(self, api, sample_coordinator_running, sample_coordinator_action_running):
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
with mock.patch.object(api, 'job_coordinator_info') as mock_info:
mock_decode.return_value = SAMPLE_COORD_ID
mock_info.return_value = sample_coordinator_running
result = api._fetch_coordinator_or_action(SAMPLE_COORD_ID)
assert result == sample_coordinator_running
assert mock_decode.called
assert mock_info.called
with mock.patch.object(api, '_decode_coord_id') as mock_decode:
with mock.patch.object(api, 'job_coordinator_info') as mock_info:
mock_decode.return_value = SAMPLE_COORD_ACTION
mock_info.return_value = sample_coordinator_action_running.coordinator()
result = api._fetch_coordinator_or_action(SAMPLE_COORD_ACTION)
assert result == sample_coordinator_action_running
assert mock_decode.called
assert mock_info.called
def test_job_coordinator_suspend_coordinator(self, api, sample_coordinator_running, sample_coordinator_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_running
assert api.job_coordinator_suspend(SAMPLE_COORD_ID)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=suspend')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_suspended
assert not api.job_coordinator_suspend(SAMPLE_COORD_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_suspend_coordinator_action(self, api, sample_coordinator_action_running,
sample_coordinator_action_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_action_running
assert api.job_coordinator_suspend(SAMPLE_COORD_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=suspend&type=action&scope=12')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_action_suspended
assert not api.job_coordinator_suspend(SAMPLE_COORD_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_resume_coordinator(self, api, sample_coordinator_running, sample_coordinator_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_suspended
assert api.job_coordinator_resume(SAMPLE_COORD_ID)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=resume')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_running
assert not api.job_coordinator_resume(SAMPLE_COORD_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_resume_coordinator_action(self, api, sample_coordinator_action_running,
sample_coordinator_action_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_action_suspended
assert api.job_coordinator_resume(SAMPLE_COORD_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=resume&type=action&scope=12')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_action_running
assert not api.job_coordinator_resume(SAMPLE_COORD_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_kill_coordinator(self, api, sample_coordinator_running, sample_coordinator_killed):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_running
assert api.job_coordinator_kill(SAMPLE_COORD_ID)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=kill')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_killed
assert not api.job_coordinator_kill(SAMPLE_COORD_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_kill_coordinator_action(self, api, sample_coordinator_action_running,
sample_coordinator_action_killed):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_action_running
assert api.job_coordinator_kill(SAMPLE_COORD_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + '?action=kill&type=action&scope=12')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_action_killed
assert not api.job_coordinator_kill(SAMPLE_COORD_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_rerun(self, api, sample_coordinator_action_running,
sample_coordinator_action_killed,
sample_coordinator_action_killed_with_killed_coordinator):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_action_killed
assert api.job_coordinator_rerun(SAMPLE_COORD_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID +
'?action=coord-rerun&type=action&scope=12&refresh=true')
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_action_killed_with_killed_coordinator
assert not api.job_coordinator_rerun(SAMPLE_COORD_ACTION)
assert not mock_put.called
mock_put.reset_mock()
mock_info.return_value = sample_coordinator_action_running
assert not api.job_coordinator_rerun(SAMPLE_COORD_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_coordinator_rerun_only_supports_actions(self, api, sample_coordinator_running):
with mock.patch.object(api, 'job_action_info') as mock_info:
mock_info.return_value = sample_coordinator_running
with pytest.raises(ValueError) as value_error:
api.job_coordinator_rerun(SAMPLE_COORD_ID)
assert str(value_error.value) == 'Rerun only supports coordinator action IDs'
def test_job_coordinator_update(self, api, sample_coordinator_running, sample_coordinator_killed):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_coordinator_info') as mock_info:
mock_info.return_value = sample_coordinator_running
mock_put.return_value = {'update': {'diff': "****Empty Diff****"}}
coord = api.job_coordinator_update(SAMPLE_COORD_ID, '/dummy/coord-path-minimal')
conf = xml._coordinator_submission_xml('oozie', '/dummy/coord-path-minimal')
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + "?action=update", conf)
mock_info.assert_called_with(coordinator_id=SAMPLE_COORD_ID)
assert coord is sample_coordinator_running
mock_put.reset_mock()
mock_info.reset_mock()
mock_info.return_value = sample_coordinator_running
mock_put.return_value = {'update': {'diff': "*****Diffs*****"}}
coord = api.job_coordinator_update(SAMPLE_COORD_ID, '/dummy/coord-path-full')
conf = xml._coordinator_submission_xml('oozie', '/dummy/coord-path-full')
mock_put.assert_called_with('job/' + SAMPLE_COORD_ID + "?action=update", conf)
mock_info.assert_called_with(coordinator_id=SAMPLE_COORD_ID)
assert coord is sample_coordinator_running
mock_put.reset_mock()
mock_info.reset_mock()
mock_info.return_value = sample_coordinator_killed
with pytest.raises(exceptions.OozieException) as err:
api.job_coordinator_update(SAMPLE_COORD_ID, '/dummy/coord-path-full')
assert 'coordinator status must be active in order to update' in str(err)
mock_info.return_value = sample_coordinator_running
mock_put.return_value = {}
with pytest.raises(exceptions.OozieException) as err:
api.job_coordinator_update(SAMPLE_COORD_ID, '/dummy/coord-path-full')
assert 'update coordinator' in str(err)
class TestOozieClientJobWorkflowManage(object):
def test_job_workflow_suspend_workflow(self, api, sample_workflow_running, sample_workflow_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_running
assert api.job_workflow_suspend(SAMPLE_WF_ID)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=suspend')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_suspended
assert not api.job_workflow_suspend(SAMPLE_WF_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_workflow_suspend_workflow_action(self, api, sample_workflow_running, sample_workflow_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_running
assert api.job_workflow_suspend(SAMPLE_WF_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=suspend')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_suspended
assert not api.job_workflow_suspend(SAMPLE_WF_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_workflow_resume_workflow(self, api, sample_workflow_running, sample_workflow_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_suspended
assert api.job_workflow_resume(SAMPLE_WF_ID)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=resume')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_running
assert not api.job_workflow_resume(SAMPLE_WF_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_workflow_resume_workflow_action(self, api, sample_workflow_running, sample_workflow_suspended):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_suspended
assert api.job_workflow_resume(SAMPLE_WF_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=resume')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_running
assert not api.job_workflow_resume(SAMPLE_WF_ACTION)
assert not mock_put.called
mock_put.reset_mock()
def test_job_workflow_start_workflow(self, api, sample_workflow_running, sample_workflow_prep):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_prep
assert api.job_workflow_start(SAMPLE_WF_ID)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=start')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_running
assert not api.job_workflow_start(SAMPLE_WF_ID)
assert not mock_put.called
mock_put.reset_mock()
def test_job_workflow_start_workflow_action(self, api, sample_workflow_running, sample_workflow_prep):
with mock.patch.object(api, '_put') as mock_put:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_prep
assert api.job_workflow_start(SAMPLE_WF_ACTION)
mock_put.assert_called_with('job/' + SAMPLE_WF_ID + '?action=start')
mock_put.reset_mock()
mock_info.return_value = sample_workflow_running
assert not api.job_workflow_start(SAMPLE_WF_ACTION)
assert not mock_put.called
mock_put.reset_mock()
class TestOozieClientJobSubmit(object):
def test_jobs_submit_coordinator(self, api, sample_coordinator_running):
with mock.patch.object(api, '_post') as mock_post:
with mock.patch.object(api, 'job_coordinator_info') as mock_info:
mock_info.return_value = sample_coordinator_running
mock_post.return_value = None
with pytest.raises(exceptions.OozieException) as err:
api.jobs_submit_coordinator('/dummy/coord-path')
assert 'Operation failed: submit coordinator' in str(err)
mock_post.assert_called_with('jobs', mock.ANY)
mock_post.reset_mock()
mock_post.return_value = {'id': SAMPLE_COORD_ID}
coord = api.jobs_submit_coordinator('/dummy/coord-path')
mock_post.assert_called_with('jobs', mock.ANY)
mock_info.assert_called_with(coordinator_id=SAMPLE_COORD_ID)
assert coord is sample_coordinator_running
mock_post.reset_mock()
def test_jobs_submit_coordinator_config(self, api, sample_coordinator_running):
with mock.patch.object(api, '_post') as mock_post:
with mock.patch.object(api, 'job_coordinator_info') as mock_info:
mock_info.return_value = sample_coordinator_running
mock_post.return_value = {'id': SAMPLE_COORD_ID}
api.jobs_submit_coordinator('/dummy/coord-path')
conf = mock_post.call_args[0][1].decode('utf-8')
assert '<name>oozie.coord.application.path</name><value>/dummy/coord-path</value>' in conf
assert '<name>user.name</name><value>oozie</value>' in conf
mock_post.reset_mock()
api.jobs_submit_coordinator('/dummy/coord-path', configuration={'test.prop': 'this is a test'})
conf = mock_post.call_args[0][1].decode('utf-8')
assert '<name>test.prop</name><value>this is a test</value>' in conf
mock_post.reset_mock()
def test_jobs_submit_workflow(self, api, sample_workflow_running):
with mock.patch.object(api, '_post') as mock_post:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_running
mock_post.return_value = None
with pytest.raises(exceptions.OozieException) as err:
api.jobs_submit_workflow('/dummy/wf-path')
assert 'Operation failed: submit workflow' in str(err)
mock_post.assert_called_with('jobs', mock.ANY)
mock_post.reset_mock()
mock_post.return_value = {'id': SAMPLE_WF_ID}
wf = api.jobs_submit_workflow('/dummy/wf-path', start=True)
mock_post.assert_called_with('jobs?action=start', mock.ANY)
assert wf is sample_workflow_running
mock_post.reset_mock()
mock_post.return_value = {'id': SAMPLE_WF_ID}
wf = api.jobs_submit_workflow('/dummy/wf-path')
mock_post.assert_called_with('jobs', mock.ANY)
mock_info.assert_called_with(workflow_id=SAMPLE_WF_ID)
assert wf is sample_workflow_running
mock_post.reset_mock()
def test_jobs_submit_workflow_config(self, api, sample_workflow_running):
with mock.patch.object(api, '_post') as mock_post:
with mock.patch.object(api, 'job_workflow_info') as mock_info:
mock_info.return_value = sample_workflow_running
mock_post.return_value = {'id': SAMPLE_WF_ID}
api.jobs_submit_workflow('/dummy/wf-path')
conf = mock_post.call_args[0][1].decode('utf-8')
assert '<name>oozie.wf.application.path</name><value>/dummy/wf-path</value>' in conf
assert '<name>user.name</name><value>oozie</value>' in conf
mock_post.reset_mock()
api.jobs_submit_workflow('/dummy/wf-path', configuration={'test.prop': 'this is a test'})
conf = mock_post.call_args[0][1].decode('utf-8')
assert '<name>test.prop</name><value>this is a test</value>' in conf
mock_post.reset_mock()
| 47.045678
| 118
| 0.647776
| 7,990
| 66,946
| 5.087484
| 0.039049
| 0.036532
| 0.049989
| 0.040665
| 0.883564
| 0.850943
| 0.823292
| 0.793599
| 0.759428
| 0.713769
| 0
| 0.010078
| 0.257401
| 66,946
| 1,422
| 119
| 47.078762
| 0.807579
| 0.002838
| 0
| 0.522847
| 0
| 0.002636
| 0.136311
| 0.041527
| 0
| 0
| 0
| 0.000703
| 0.255712
| 1
| 0.073814
| false
| 0.007909
| 0.008787
| 0.000879
| 0.102812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da1cd997565c598625d3fbc3be2100124fc27c2c
| 21,915
|
py
|
Python
|
multipy/flux.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
multipy/flux.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
multipy/flux.py
|
kamilazdybal/multipy
|
ebdcddb63bfb1cd647ca99bbf9002b04a9b50ed9
|
[
"MIT"
] | null | null | null |
"""multipy: Python library for multicomponent mass transfer"""
__author__ = "James C. Sutherland, Kamila Zdybal"
__copyright__ = "Copyright (c) 2022, James C. Sutherland, Kamila Zdybal"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["Kamila Zdybal"]
__email__ = ["kamilazdybal@gmail.com"]
__status__ = "Production"
import numpy as np
import pandas as pd
import random
import copy
import scipy
import multipy
import warnings
gas_constant = 8.31446261815324
################################################################################
################################################################################
####
#### Class: Flux
####
################################################################################
################################################################################
class Flux:
"""
Supports computing and storing fluxes. This class assumes that the species velocities, :math:`\\mathbf{u}_i`, are known.
Diffusive fluxes:
- mass diffusive flux relative to a mass-averaged velocity, :math:`\mathbf{j}_i`
- mass diffusive flux relative to a molar-averaged velocity, :math:`\mathbf{j}_i^u`
- molar diffusive flux relative to a mass-averaged velocity, :math:`\mathbf{J}_i^v`
- molar diffusive flux relative to a molar-averaged velocity, :math:`\mathbf{J}_i`
:param species_velocities:
vector ``numpy.ndarray`` specifying the species velocities :math:`\mathbf{u}_i` in :math:`[m/s]`. It should be of size ``(n_species,n_observations)``.
**Getters:**
- **get_species_velocities**
- **get_diffusive_molar_molar** (is set to ``None`` at class init)
- **get_diffusive_molar_mass** (is set to ``None`` at class init)
- **get_diffusive_mass_molar** (is set to ``None`` at class init)
- **get_diffusive_mass_mass** (is set to ``None`` at class init)
**Setters:**
- **set_species_velocities**
- **set_diffusive_molar_molar** (is set to ``None`` at class init)
- **set_diffusive_molar_mass** (is set to ``None`` at class init)
- **set_diffusive_mass_molar** (is set to ``None`` at class init)
- **set_diffusive_mass_mass** (is set to ``None`` at class init)
"""
# --------------------------------------------------------------------------
def __init__(self, species_velocities):
if not isinstance(species_velocities, np.ndarray):
raise ValueError("Parameter `species_velocities` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(species_velocities)
except:
raise ValueError("Parameter `species_velocities` has to be a matrix.")
if n_species < 2:
raise ValueError("Parameter `species_velocities` has to have at least two species.")
self.__species_velocities = species_velocities
self.__velocity = multipy.Velocity(self.get_species_velocities)
self.__diffusive_molar_molar = None
self.__diffusive_molar_mass = None
self.__diffusive_mass_molar = None
self.__diffusive_mass_mass = None
@property
def get_species_velocities(self):
return self.__species_velocities
@property
def get_diffusive_molar_molar(self):
return self.__diffusive_molar_molar
@property
def get_diffusive_molar_mass(self):
return self.__diffusive_molar_mass
@property
def get_diffusive_mass_molar(self):
return self.__diffusive_mass_molar
@property
def get_diffusive_mass_mass(self):
return self.__diffusive_mass_mass
@get_species_velocities.setter
def set_species_velocities(self, new_species_velocities):
if new_species_velocities is not None:
if not isinstance(new_species_velocities, np.ndarray):
raise ValueError("Parameter `species_velocities` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_species_velocities)
except:
raise ValueError("Parameter `species_velocities` has to be a matrix.")
self.__species_velocities = new_species_velocities
@get_diffusive_molar_molar.setter
def set_diffusive_molar_molar(self, new_diffusive_molar_molar):
if new_diffusive_molar_molar is not None:
if not isinstance(new_diffusive_molar_molar, np.ndarray):
raise ValueError("Parameter `diffusive_molar_molar` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_molar_molar)
except:
raise ValueError("Parameter `diffusive_molar_molar` has to be a matrix.")
self.__diffusive_molar_molar = new_diffusive_molar_molar
@get_diffusive_molar_mass.setter
def set_diffusive_molar_mass(self, new_diffusive_molar_mass):
if new_diffusive_molar_mass is not None:
if not isinstance(new_diffusive_molar_mass, np.ndarray):
raise ValueError("Parameter `diffusive_molar_mass` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_molar_mass)
except:
raise ValueError("Parameter `diffusive_molar_mass` has to be a matrix.")
self.__diffusive_molar_mass = new_diffusive_molar_mass
@get_diffusive_mass_molar.setter
def set_diffusive_mass_molar(self, new_diffusive_mass_molar):
if new_diffusive_mass_molar is not None:
if not isinstance(new_diffusive_mass_molar, np.ndarray):
raise ValueError("Parameter `diffusive_mass_molar` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_mass_molar)
except:
raise ValueError("Parameter `diffusive_mass_molar` has to be a matrix.")
self.__diffusive_mass_molar = new_diffusive_mass_molar
@get_diffusive_mass_mass.setter
def set_diffusive_mass_mass(self, new_diffusive_mass_mass):
if new_diffusive_mass_mass is not None:
if not isinstance(new_diffusive_mass_mass, np.ndarray):
raise ValueError("Parameter `diffusive_mass_mass` has to be of type `numpy.ndarray`.")
try:
(n_species, n_observations) = np.shape(new_diffusive_mass_mass)
except:
raise ValueError("Parameter `diffusive_mass_mass` has to be a matrix.")
self.__diffusive_mass_mass = new_diffusive_mass_mass
# --------------------------------------------------------------------------
def plot_diffusive_flux(self, species_names=None, colors=None, figsize=(10,5), filename=None):
"""
Plots the computed diffusive fluxes.
**Example:**
.. image:: ../images/stefan-tube-diffusive-flux-molar-diff-molar-avg.svg
:width: 400
:param species_names: (optional)
``list`` of ``str`` specifying the species names.
:param colors: (optional)
``list`` of ``str`` specifying the plotting colors for each species. Example: ``colors=['#C7254E', '#BBBBBB', '#008CBA']``.
:param figsize: (optional)
``tuple`` specifying the figure size.
:param filename: (optional)
``str`` specifying the filename. If set to ``None``, plot will not be saved to a file.
"""
if filename is not None:
path = False
if filename[0:2] == '..':
__filename = filename[2::]
path = True
else:
__filename = filename
__base = __filename.split('.')[0]
__extension = __filename.split('.')[1]
if path:
__filename = '..' + __base
else:
__filename = __base
if self.get_diffusive_molar_molar is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_molar, flux='molar', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-molar-diff-molar-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_molar, flux='molar', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_molar_mass is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_mass, flux='molar', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-molar-diff-mass-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_molar_mass, flux='molar', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_mass_molar is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_molar, flux='mass', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-mass-diff-molar-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_molar, flux='mass', velocity='molar', species_names=species_names, colors=colors, figsize=figsize, filename=None)
if self.get_diffusive_mass_mass is not None:
if filename is not None:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_mass, flux='mass', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=__filename + '-mass-diff-mass-avg.' + __extension)
else:
plt = multipy.plot.plot_1d_diffusive_flux(self.get_diffusive_mass_mass, flux='mass', velocity='mass', species_names=species_names, colors=colors, figsize=figsize, filename=None)
# --------------------------------------------------------------------------
def diffusive_molar_molar(self, species_mole_fractions, species_molar_densities):
"""
Computes the molar diffusive flux relative to a molar-averaged velocity:
.. math::
\mathbf{J}_i = c_i \mathbf{u}_i + c_i \mathbf{u}
:param species_mole_fractions:
scalar ``numpy.ndarray`` specifying the species mole fractions, :math:`X_i`, in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_molar_densities:
scalar ``numpy.ndarray`` specifying the molar densities of species, :math:`c_i`, in :math:`[mole/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of molar diffusive fluxes relative to a molar-averaged velocity :math:`\mathbf{J}_i` in :math:`[mole/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mole_fractions, np.ndarray):
raise ValueError("Parameter `species_mole_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mole_fractions)
except:
raise ValueError("Parameter `species_mole_fractions` has to be a matrix.")
if not isinstance(species_molar_densities, np.ndarray):
raise ValueError("Parameter `species_molar_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_molar_densities)
except:
raise ValueError("Parameter `species_molar_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mole_fractions` and `species_molar_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mole_fractions` and `species_molar_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mole_fractions`, `species_molar_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mole_fractions`, `species_molar_densities` and `species_velocities` have different number of species `n_species`.")
molar_averaged_velocity = self.__velocity.molar_averaged(species_mole_fractions)
diffusive_flux = np.multiply(species_molar_densities, self.get_species_velocities) - np.multiply(species_molar_densities, molar_averaged_velocity)
self.__diffusive_molar_molar = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_molar_mass(self, species_mass_fractions, species_molar_densities):
"""
Computes the molar diffusive flux relative to a mass-averaged velocity:
.. math::
\mathbf{J}_i^v = c_i \mathbf{u}_i + c_i \mathbf{v}
:param species_mass_fractions:
scalar ``numpy.ndarray`` specifying the species mass fractions, :math:`Y_i`, in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_molar_densities:
scalar ``numpy.ndarray`` specifying the species molar densities :math:`c_i` in :math:`[mole/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of molar diffusive fluxes relative to a mass-averaged velocity :math:`\mathbf{J}_i^v` in :math:`[mole/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mass_fractions, np.ndarray):
raise ValueError("Parameter `species_mass_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mass_fractions)
except:
raise ValueError("Parameter `species_mass_fractions` has to be a matrix.")
if not isinstance(species_molar_densities, np.ndarray):
raise ValueError("Parameter `species_molar_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_molar_densities)
except:
raise ValueError("Parameter `species_molar_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mass_fractions` and `species_molar_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mass_fractions` and `species_molar_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mass_fractions`, `species_molar_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mass_fractions`, `species_molar_densities` and `species_velocities` have different number of species `n_species`.")
mass_averaged_velocity = self.__velocity.mass_averaged(species_mass_fractions)
diffusive_flux = np.multiply(species_molar_densities, self.get_species_velocities) - np.multiply(species_molar_densities, mass_averaged_velocity)
self.__diffusive_molar_mass = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_mass_molar(self, species_mole_fractions, species_mass_densities):
"""
Computes the mass diffusive flux relative to a molar-averaged velocity:
.. math::
\mathbf{j}_i^u = \\rho_i \mathbf{u}_i + \\rho_i \mathbf{u}
:param species_mole_fractions:
scalar ``numpy.ndarray`` specifying the species mole fractions :math:`X_i` in :math:`[-]`. It should be of size ``(n_species,n_observations)``.
:param species_mass_densities:
scalar ``numpy.ndarray`` specifying the species mass densities :math:`\mathbf{\\rho}_i` in :math:`[kg/m^3]`. It should be of size ``(n_species,n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of mass diffusive fluxes relative to a molar-averaged velocity :math:`\mathbf{j}_i^u` in :math:`[kg/(m^2s)]`. It has size ``(n_species,n_observations)``.
"""
if not isinstance(species_mole_fractions, np.ndarray):
raise ValueError("Parameter `species_mole_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mole_fractions)
except:
raise ValueError("Parameter `species_mole_fractions` has to be a matrix.")
if not isinstance(species_mass_densities, np.ndarray):
raise ValueError("Parameter `species_mass_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_mass_densities)
except:
raise ValueError("Parameter `species_mass_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mole_fractions` and `species_mass_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mole_fractions` and `species_mass_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mole_fractions`, `species_mass_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mole_fractions`, `species_mass_densities` and `species_velocities` have different number of species `n_species`.")
molar_averaged_velocity = self.__velocity.molar_averaged(species_mole_fractions)
diffusive_flux = np.multiply(species_mass_densities, self.get_species_velocities) - np.multiply(species_mass_densities, molar_averaged_velocity)
self.__diffusive_mass_molar = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
def diffusive_mass_mass(self, species_mass_fractions, species_mass_densities):
"""
Computes the mass diffusive flux relative to a mass-averaged velocity:
.. math::
\mathbf{j}_i = \\rho_i \mathbf{u}_i + \\rho_i \mathbf{v}
:param species_mass_fractions:
scalar ``numpy.ndarray`` specifying the species mass fractions :math:`Y_i` in :math:`[-]`. It should be of size ``(n_species, n_observations)``.
:param species_mass_densities:
scalar ``numpy.ndarray`` specifying the species mass densities :math:`\mathbf{\\rho}_i` in :math:`[kg/m^3]`. It should be of size ``(n_species, n_observations)``.
:return:
- **diffusive_flux** - vector ``numpy.ndarray`` of mass diffusive fluxes relative to a mass-averaged velocity :math:`\mathbf{j}_i` in :math:`[kg/(m^2s)]`. It has size ``(n_species, n_observations)``.
"""
if not isinstance(species_mass_fractions, np.ndarray):
raise ValueError("Parameter `species_mass_fractions` has to be of type `numpy.ndarray`.")
try:
(n_species_1, n_observations_1) = np.shape(species_mass_fractions)
except:
raise ValueError("Parameter `species_mass_fractions` has to be a matrix.")
if not isinstance(species_mass_densities, np.ndarray):
raise ValueError("Parameter `species_mass_densities` has to be of type `numpy.ndarray`.")
try:
(n_species_2, n_observations_2) = np.shape(species_mass_densities)
except:
raise ValueError("Parameter `species_mass_densities` has to be a matrix.")
if n_observations_1 != n_observations_2:
raise ValueError("Parameters `species_mass_fractions` and `species_mass_densities` have different number of observations `n_observations`.")
if n_species_1 != n_species_2:
raise ValueError("Parameters `species_mass_fractions` and `species_mass_densities` have different number of species `n_species`.")
(n_species, n_observations) = np.shape(self.get_species_velocities)
if n_observations != n_observations_1:
raise ValueError("Parameters `species_mass_fractions`, `species_mass_densities` and `species_velocities` have different number of observations `n_observations`.")
if n_species != n_species_1:
raise ValueError("Parameters `species_mass_fractions`, `species_mass_densities` and `species_velocities` have different number of species `n_species`.")
mass_averaged_velocity = self.__velocity.mass_averaged(species_mass_fractions)
diffusive_flux = np.multiply(species_mass_densities, self.get_species_velocities) - np.multiply(species_mass_densities, mass_averaged_velocity)
self.__diffusive_mass_mass = diffusive_flux
return diffusive_flux
# --------------------------------------------------------------------------
| 47.745098
| 244
| 0.657906
| 2,632
| 21,915
| 5.159954
| 0.06421
| 0.032987
| 0.020543
| 0.035564
| 0.869008
| 0.83256
| 0.801856
| 0.78168
| 0.759591
| 0.718136
| 0
| 0.005602
| 0.209902
| 21,915
| 458
| 245
| 47.849345
| 0.778747
| 0.244809
| 0
| 0.461538
| 0
| 0
| 0.261039
| 0.091635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068376
| false
| 0
| 0.029915
| 0.021368
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e531c9c1ac9eaf5cf3f53b6dbc300b0ff9a2f799
| 238
|
py
|
Python
|
app/modules/dht_sensor/temp_controller.py
|
bytecode-tech/my-tank
|
e37dc844fd2801b26710b461f64a6f938a5371db
|
[
"MIT"
] | 1
|
2020-05-21T04:56:51.000Z
|
2020-05-21T04:56:51.000Z
|
app/modules/dht_sensor/temp_controller.py
|
kandiki/my-tank
|
e37dc844fd2801b26710b461f64a6f938a5371db
|
[
"MIT"
] | null | null | null |
app/modules/dht_sensor/temp_controller.py
|
kandiki/my-tank
|
e37dc844fd2801b26710b461f64a6f938a5371db
|
[
"MIT"
] | 1
|
2020-04-21T20:24:36.000Z
|
2020-04-21T20:24:36.000Z
|
from flask import (Blueprint, request)
from . import temp
temp_controller = Blueprint('temp-controller', __name__, url_prefix='/api/temp')
@temp_controller.route('/', methods=["GET"])
def api_temp_control():
return temp.read_temp()
| 26.444444
| 80
| 0.739496
| 31
| 238
| 5.354839
| 0.580645
| 0.253012
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109244
| 238
| 9
| 81
| 26.444444
| 0.783019
| 0
| 0
| 0
| 0
| 0
| 0.117155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e5b26b56fc6da7a7d52f30abc550181b0a2029e2
| 112
|
py
|
Python
|
src/nwb_datajoint/data_import/__init__.py
|
jihyunbak/spyglass
|
780fe2c101db60d42a1b73ad8fd729db42620ba6
|
[
"MIT"
] | 14
|
2020-02-04T20:05:02.000Z
|
2022-03-13T18:13:20.000Z
|
src/nwb_datajoint/data_import/__init__.py
|
jihyunbak/spyglass
|
780fe2c101db60d42a1b73ad8fd729db42620ba6
|
[
"MIT"
] | 118
|
2020-06-15T16:40:48.000Z
|
2022-03-21T17:25:47.000Z
|
src/nwb_datajoint/data_import/__init__.py
|
jihyunbak/spyglass
|
780fe2c101db60d42a1b73ad8fd729db42620ba6
|
[
"MIT"
] | 16
|
2020-02-04T19:04:07.000Z
|
2022-03-18T21:15:32.000Z
|
from .insert_sessions import insert_sessions
from .storage_dirs import base_dir, check_env, kachery_storage_dir
| 37.333333
| 66
| 0.875
| 17
| 112
| 5.352941
| 0.647059
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 112
| 2
| 67
| 56
| 0.892157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5c1039cda0312feb3e593ae896f87fa3acd4dcc
| 53
|
py
|
Python
|
src/HABApp/rule/scheduler/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 44
|
2018-12-13T08:46:44.000Z
|
2022-03-07T03:23:21.000Z
|
src/HABApp/rule/scheduler/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 156
|
2019-03-02T20:53:31.000Z
|
2022-03-23T13:13:58.000Z
|
src/HABApp/rule/scheduler/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 18
|
2019-03-08T07:13:21.000Z
|
2022-03-22T19:52:31.000Z
|
from .habappschedulerview import HABAppSchedulerView
| 26.5
| 52
| 0.90566
| 4
| 53
| 12
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.979592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f90424eb70e977fa6f417a29fc944a6fba32e65d
| 45
|
py
|
Python
|
sickbeard/lib/pynma/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/pynma/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/pynma/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from pynma import PyNMA
| 9
| 24
| 0.711111
| 7
| 45
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 4
| 25
| 11.25
| 0.864865
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f9201532aaedc6e2918aab248d632b916f61d1ad
| 34
|
py
|
Python
|
tests/__init__.py
|
soundappraisal/wavinfo
|
c966097e7d611366bc4b9b623ed72a1b0b38fe34
|
[
"MIT"
] | 15
|
2019-05-16T14:07:17.000Z
|
2022-03-31T17:43:03.000Z
|
tests/__init__.py
|
soundappraisal/wavinfo
|
c966097e7d611366bc4b9b623ed72a1b0b38fe34
|
[
"MIT"
] | 9
|
2019-02-13T16:04:21.000Z
|
2022-01-31T02:54:07.000Z
|
tests/__init__.py
|
soundappraisal/wavinfo
|
c966097e7d611366bc4b9b623ed72a1b0b38fe34
|
[
"MIT"
] | 9
|
2019-05-03T20:39:06.000Z
|
2022-03-22T20:43:22.000Z
|
from . import test_wave_parsing
| 8.5
| 31
| 0.794118
| 5
| 34
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 34
| 3
| 32
| 11.333333
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00bbfd85cea4d78f9b99f65415abd3a203fb0784
| 49
|
py
|
Python
|
img_url_replace/__init__.py
|
ggqshr/replace_img_md
|
bdb5ace24eb243454785aac924b9dd51827065a3
|
[
"BSD-3-Clause"
] | 1
|
2020-12-30T09:31:08.000Z
|
2020-12-30T09:31:08.000Z
|
img_url_replace/__init__.py
|
ggqshr/replace_img_md
|
bdb5ace24eb243454785aac924b9dd51827065a3
|
[
"BSD-3-Clause"
] | null | null | null |
img_url_replace/__init__.py
|
ggqshr/replace_img_md
|
bdb5ace24eb243454785aac924b9dd51827065a3
|
[
"BSD-3-Clause"
] | null | null | null |
from .img_upload import *
from .parse_md import *
| 24.5
| 25
| 0.77551
| 8
| 49
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 26
| 24.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00c6e13170e64460aa3e7594fd35d9a9f26fb186
| 11,195
|
py
|
Python
|
tests/library/test_ceph_ec_profile.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 1,570
|
2015-01-03T08:38:22.000Z
|
2022-03-31T09:24:37.000Z
|
tests/library/test_ceph_ec_profile.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 4,964
|
2015-01-05T10:41:44.000Z
|
2022-03-31T07:59:49.000Z
|
tests/library/test_ceph_ec_profile.py
|
u-kosmonaft-u/ceph-ansible
|
14c472707c165f77def05826b22885480af3e8f9
|
[
"Apache-2.0"
] | 1,231
|
2015-01-04T11:48:16.000Z
|
2022-03-31T12:15:28.000Z
|
from mock.mock import MagicMock, patch
import ca_test_common
import ceph_ec_profile
import pytest
class TestCephEcProfile(object):
def setup_method(self):
self.fake_params = []
self.fake_binary = 'ceph'
self.fake_cluster = 'ceph'
self.fake_name = 'foo'
self.fake_k = 2
self.fake_m = 4
self.fake_module = MagicMock()
self.fake_module.params = self.fake_params
def test_get_profile(self):
expected_cmd = [
self.fake_binary,
'-n', 'client.admin',
'-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', self.fake_cluster,
'osd', 'erasure-code-profile',
'get', self.fake_name,
'--format=json'
]
assert ceph_ec_profile.get_profile(self.fake_module, self.fake_name) == expected_cmd
@pytest.mark.parametrize("stripe_unit,force", [(False, False),
(32, True),
(False, True),
(32, False)])
def test_create_profile(self, stripe_unit, force):
expected_cmd = [
self.fake_binary,
'-n', 'client.admin',
'-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', self.fake_cluster,
'osd', 'erasure-code-profile',
'set', self.fake_name,
'k={}'.format(self.fake_k), 'm={}'.format(self.fake_m),
]
if stripe_unit:
expected_cmd.append('stripe_unit={}'.format(stripe_unit))
if force:
expected_cmd.append('--force')
assert ceph_ec_profile.create_profile(self.fake_module,
self.fake_name,
self.fake_k,
self.fake_m,
stripe_unit,
self.fake_cluster,
force) == expected_cmd
def test_delete_profile(self):
expected_cmd = [
self.fake_binary,
'-n', 'client.admin',
'-k', '/etc/ceph/ceph.client.admin.keyring',
'--cluster', self.fake_cluster,
'osd', 'erasure-code-profile',
'rm', self.fake_name
]
assert ceph_ec_profile.delete_profile(self.fake_module,
self.fake_name,
self.fake_cluster) == expected_cmd
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ceph_ec_profile.exec_command')
def test_state_present_nothing_to_update(self, m_exec_command, m_exit_json, m_fail_json):
ca_test_common.set_module_args({"state": "present",
"name": "foo",
"k": 2,
"m": 4,
"stripe_unit": 32,
})
m_exit_json.side_effect = ca_test_common.exit_json
m_fail_json.side_effect = ca_test_common.fail_json
m_exec_command.return_value = (0,
['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
'{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501
'')
with pytest.raises(ca_test_common.AnsibleExitJson) as r:
ceph_ec_profile.run_module()
result = r.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json']
assert result['stdout'] == '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}' # noqa: E501
assert not result['stderr']
assert result['rc'] == 0
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ceph_ec_profile.exec_command')
def test_state_present_profile_to_update(self, m_exec_command, m_exit_json, m_fail_json):
ca_test_common.set_module_args({"state": "present",
"name": "foo",
"k": 2,
"m": 6,
"stripe_unit": 32
})
m_exit_json.side_effect = ca_test_common.exit_json
m_fail_json.side_effect = ca_test_common.fail_json
m_exec_command.side_effect = [
(0,
['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
'{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501
''),
(0,
['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'],
'',
''
)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as r:
ceph_ec_profile.run_module()
result = r.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force']
assert not result['stdout']
assert not result['stderr']
assert result['rc'] == 0
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ceph_ec_profile.exec_command')
def test_state_present_profile_doesnt_exist(self, m_exec_command, m_exit_json, m_fail_json):
ca_test_common.set_module_args({"state": "present",
"name": "foo",
"k": 2,
"m": 4,
"stripe_unit": 32
})
m_exit_json.side_effect = ca_test_common.exit_json
m_fail_json.side_effect = ca_test_common.fail_json
m_exec_command.side_effect = [
(2,
['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
'',
"Error ENOENT: unknown erasure code profile 'foo'"),
(0,
['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'],
'',
''
)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as r:
ceph_ec_profile.run_module()
result = r.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force']
assert not result['stdout']
assert not result['stderr']
assert result['rc'] == 0
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ceph_ec_profile.exec_command')
def test_state_absent_on_existing_profile(self, m_exec_command, m_exit_json, m_fail_json):
ca_test_common.set_module_args({"state": "absent",
"name": "foo"
})
m_exit_json.side_effect = ca_test_common.exit_json
m_fail_json.side_effect = ca_test_common.fail_json
m_exec_command.return_value = (0,
['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'],
'',
'')
with pytest.raises(ca_test_common.AnsibleExitJson) as r:
ceph_ec_profile.run_module()
result = r.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo']
assert result['stdout'] == 'Profile foo removed.'
assert not result['stderr']
assert result['rc'] == 0
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ceph_ec_profile.exec_command')
def test_state_absent_on_nonexisting_profile(self, m_exec_command, m_exit_json, m_fail_json):
ca_test_common.set_module_args({"state": "absent",
"name": "foo"
})
m_exit_json.side_effect = ca_test_common.exit_json
m_fail_json.side_effect = ca_test_common.fail_json
m_exec_command.return_value = (0,
['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'],
'',
'erasure-code-profile foo does not exist')
with pytest.raises(ca_test_common.AnsibleExitJson) as r:
ceph_ec_profile.run_module()
result = r.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo']
assert result['stdout'] == "Skipping, the profile foo doesn't exist"
assert result['stderr'] == 'erasure-code-profile foo does not exist'
assert result['rc'] == 0
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'name': 'foo',
'k': 2,
'm': 4,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_ec_profile.run_module()
result = result.value.args[0]
assert not result['changed']
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
| 48.04721
| 264
| 0.496472
| 1,165
| 11,195
| 4.501288
| 0.109013
| 0.045767
| 0.05492
| 0.060069
| 0.82151
| 0.819985
| 0.812548
| 0.788139
| 0.769832
| 0.734935
| 0
| 0.011234
| 0.371862
| 11,195
| 232
| 265
| 48.25431
| 0.734499
| 0.002858
| 0
| 0.640777
| 0
| 0.014563
| 0.239448
| 0.126893
| 0
| 0
| 0
| 0
| 0.15534
| 1
| 0.048544
| false
| 0
| 0.019417
| 0
| 0.072816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
00d9574f47a9a5235f879d1a411b52f1046891ac
| 1,399
|
py
|
Python
|
lexical/process_lexical.py
|
xmeng17/Malicious-URL-Detection
|
f286aeb50570455486b470cbc2db9aa0fae99b8f
|
[
"MIT"
] | null | null | null |
lexical/process_lexical.py
|
xmeng17/Malicious-URL-Detection
|
f286aeb50570455486b470cbc2db9aa0fae99b8f
|
[
"MIT"
] | null | null | null |
lexical/process_lexical.py
|
xmeng17/Malicious-URL-Detection
|
f286aeb50570455486b470cbc2db9aa0fae99b8f
|
[
"MIT"
] | null | null | null |
import lexical as lx
import codecs
from random import shuffle
l=lx.lexical()
result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n'
result_arr1=[]
with codecs.open('../parse/good.csv',encoding='utf-8') as f:
string=f.read()
arr=string.split('\n')
del(arr[0])
del(arr[-1])
for line in arr:
comp=line.split(',')
hostname=comp[0]
tld=comp[1]
path=comp[3]
dot_num, avg_host, max_host, avg_path, max_path=l.lexical(hostname,path)
result_arr1.append(tld+','+str(dot_num)+','+str(avg_host)+','+str(max_host)+','+str(avg_path)+','+str(max_path)+',good')
result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n'
result+='\n'.join(result_arr1)
with codecs.open('lexical_good.csv',mode='w',encoding='utf-8') as f:
f.write(result)
result_arr2=[]
with codecs.open('../parse/bad.csv',encoding='utf-8') as f:
string=f.read()
arr=string.split('\n')
del(arr[0])
del(arr[-1])
for line in arr:
comp=line.split(',')
hostname=comp[0]
tld=comp[1]
path=comp[3]
dot_num, avg_host, max_host, avg_path, max_path=l.lexical(hostname,path)
result_arr2.append(tld+','+str(dot_num)+','+str(avg_host)+','+str(max_host)+','+str(avg_path)+','+str(max_path)+',bad')
result='tld,dot_num,avg_host,max_host,avg_path,max_path,class\n'
result+='\n'.join(result_arr2)
with codecs.open('lexical_bad.csv',mode='w',encoding='utf-8') as f:
f.write(result)
| 30.413043
| 124
| 0.68263
| 249
| 1,399
| 3.662651
| 0.200803
| 0.046053
| 0.049342
| 0.071272
| 0.873904
| 0.788377
| 0.788377
| 0.788377
| 0.788377
| 0.788377
| 0
| 0.015911
| 0.101501
| 1,399
| 45
| 125
| 31.088889
| 0.709626
| 0
| 0
| 0.641026
| 0
| 0
| 0.200143
| 0.117941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dad77c6e4ccbfc91bf868ef8fb393b299a2ec072
| 30
|
py
|
Python
|
main.py
|
jsjhcccn/qqbot
|
90505ae42a5b2c3a74b85c7bc98b597373aa6b6b
|
[
"MIT"
] | 4,253
|
2016-07-30T11:54:14.000Z
|
2022-03-17T15:40:29.000Z
|
main.py
|
MidKateIsOP520/qqbot
|
c5f53c31062ad5c6e70555b9bf0479488339ecbe
|
[
"MIT"
] | 375
|
2016-08-05T02:07:19.000Z
|
2020-08-03T01:29:32.000Z
|
main.py
|
MidKateIsOP520/qqbot
|
c5f53c31062ad5c6e70555b9bf0479488339ecbe
|
[
"MIT"
] | 1,183
|
2016-07-31T01:37:43.000Z
|
2022-03-13T13:18:04.000Z
|
from qqbot import Main; Main()
| 30
| 30
| 0.766667
| 5
| 30
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9717ef5319c02295a09cebf35c34878e1c442673
| 194
|
py
|
Python
|
cards/cards.py
|
coord-e/lottery-backend
|
4e1136ea62c471c98871ae28ad6b5144657151b4
|
[
"MIT"
] | 3
|
2019-03-17T12:53:25.000Z
|
2020-06-28T07:05:47.000Z
|
cards/cards.py
|
coord-e/lottery-backend
|
4e1136ea62c471c98871ae28ad6b5144657151b4
|
[
"MIT"
] | 297
|
2018-06-23T09:48:04.000Z
|
2021-09-08T00:06:11.000Z
|
cards/cards.py
|
Sakuten/lottery-backend
|
4e1136ea62c471c98871ae28ad6b5144657151b4
|
[
"MIT"
] | 3
|
2019-03-07T15:38:19.000Z
|
2019-03-30T08:00:14.000Z
|
class card:
"""the QR card object.
"""
qr_path = ''
public_id = ''
def __init__(self, qr_path, public_id):
self.qr_path = qr_path
self.public_id = public_id
| 19.4
| 43
| 0.572165
| 27
| 194
| 3.666667
| 0.407407
| 0.242424
| 0.242424
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.309278
| 194
| 9
| 44
| 21.555556
| 0.738806
| 0.097938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
971eb594799cf95db900e61f4da391f8dbcd3b67
| 14,667
|
py
|
Python
|
App_TESS/app.py
|
mikedcurry/TESS_Flask_App
|
e20b4fc84b8d85cb20c388534899cf9be36e9b86
|
[
"MIT"
] | null | null | null |
App_TESS/app.py
|
mikedcurry/TESS_Flask_App
|
e20b4fc84b8d85cb20c388534899cf9be36e9b86
|
[
"MIT"
] | 1
|
2020-09-26T00:11:55.000Z
|
2020-09-26T00:11:55.000Z
|
App_TESS/app.py
|
mikedcurry/TESS_Flask_App
|
e20b4fc84b8d85cb20c388534899cf9be36e9b86
|
[
"MIT"
] | 2
|
2019-09-23T16:46:13.000Z
|
2019-09-26T15:53:44.000Z
|
"""Main Application and routing Logic for TESS Flask App"""
from decouple import config
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import pickle
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import RobustScaler
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import pandas as pd
import numpy as np
from tensorflow.keras.models import load_model
from .models import DB, TOI_Table, TIC_Cat_Table
from .models import *
from .light_curve import *
from .Data_in import *
def create_app():
"""create and config an instance of the Flask App"""
app = Flask(__name__)
# configure DB, will need to update this when changing DBs?
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = config('ENV')
DB.init_app(app)
# Create home route
@app.route('/')
def root():
toi_table=(TOI_Table.query.all())
#Pull example data from Notebooks folder. Will be be pulled from sql DB in the future.
return render_template('home.html', title = 'Findin Planets:TESS', toi_table=toi_table)
@app.route('/total_reset')
def total_reset():
DB.drop_all()
DB.create_all()
get_visual_data()
get_toi_data()
get_tic_catalog()
return render_template('home.html', title='Reset Database!')
@app.route('/image')
def image():
return render_template('image.html', title='Light curve images')
@app.route('/predict')
def predict():
# Gathering the necessary data from sql database:
def get_data():
# Pulling data from sql database
toi_rows = TOI_Table.query.all()
tic_catalog_rows = TIC_Cat_Table.query.all()
toi_dict = {'TIC_ID': [row.TIC_ID for row in toi_rows],
'TOI': [row.TOI for row in toi_rows],
'Epoch': [row.Epoch for row in toi_rows],
'Period': [row.Period for row in toi_rows],
'Duration': [row.Duration for row in toi_rows],
'Depth': [row.Depth for row in toi_rows],
'Planet_Radius': [row.Planet_Radius for row in toi_rows],
'Planet_Insolation': [row.Planet_Insolation for row in toi_rows],
'Planet_Equil_Temp': [row.Planet_Equil_Temp for row in toi_rows],
'Planet_SNR': [row.Planet_SNR for row in toi_rows],
'Stellar_Distance': [row.Stellar_Distance for row in toi_rows],
'Stellar_log_g': [row.Stellar_log_g for row in toi_rows],
'Stellar_Radius': [row.Stellar_Radius for row in toi_rows],
'TFOPWG_Disposition': [row.TFOPWG_Disposition for row in toi_rows]}
tic_catalog_dict = {'TIC_ID': [row.TIC_ID for row in tic_catalog_rows],
'ra': [row.ra for row in tic_catalog_rows],
'dec': [row.dec for row in tic_catalog_rows],
'pmRA': [row.pmRA for row in tic_catalog_rows],
'pmDEC': [row.pmDEC for row in tic_catalog_rows],
'plx': [row.plx for row in tic_catalog_rows],
'gallong': [row.gallong for row in tic_catalog_rows],
'gallat': [row.gallat for row in tic_catalog_rows],
'eclong': [row.eclong for row in tic_catalog_rows],
'eclat': [row.eclat for row in tic_catalog_rows],
'Tmag': [row.Tmag for row in tic_catalog_rows],
'Teff': [row.Teff for row in tic_catalog_rows],
'logg': [row.logg for row in tic_catalog_rows],
'MH': [row.MH for row in tic_catalog_rows],
'rad': [row.rad for row in tic_catalog_rows],
'mass': [row.mass for row in tic_catalog_rows],
'rho': [row.rho for row in tic_catalog_rows],
'lum': [row.lum for row in tic_catalog_rows],
'd': [row.d for row in tic_catalog_rows],
'ebv': [row.ebv for row in tic_catalog_rows],
'numcont': [row.numcont for row in tic_catalog_rows],
'contratio': [row.contratio for row in tic_catalog_rows],
'priority': [row.priority for row in tic_catalog_rows]}
toi = pd.DataFrame(toi_dict)
tic_catalog = pd.DataFrame(tic_catalog_dict)
df = toi.merge(tic_catalog, on='TIC_ID')
return df
# Shaping the data for input:
def shape_data(df):
# Dropping data not needed for model:
X = df.drop(columns=['TIC_ID', 'TOI', 'TFOPWG_Disposition'])
return X
# Setting up model architecture for neural net:
def create_model():
# Instantiate model:
model = Sequential()
# Add input layer:
model.add(Dense(20, input_dim=33, activation='relu'))
# Add hidden layer:
model.add(Dense(20, activation='relu'))
# Add output layer:
model.add(Dense(1, activation='sigmoid'))
# Compile model:
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
# Loading pipeline:
def load_pipeline():
tess_pipeline = pickle.load(open('tess_pipeline.pkl', 'rb'))
tess_pipeline.named_steps[
'kerasclassifier'].model = load_model(
'keras_classifier.h5')
return tess_pipeline
# Get predictions for all observations:
def get_all_predictions():
y_pred_proba_full = load_pipeline().predict_proba(shape_data(get_data()))
toi_index = pd.DataFrame(get_data()['TOI'])
output_df = pd.DataFrame(y_pred_proba_full, columns=[
'actual_exoplanet_prob', 'false_positive_prob'])
output_df = toi_index.join(output_df)
output_df['prediction'] = np.where(
output_df['actual_exoplanet_prob'] >= output_df[
'false_positive_prob'],
'Actual Exoplanet', 'False Positive')
output_df['prediction_prob'] = np.where(output_df[
'actual_exoplanet_prob']>= output_df[
'false_positive_prob'], output_df[
'actual_exoplanet_prob'], output_df[
'false_positive_prob'])
return output_df
get_all_predictions()
return render_template('predict.html', title='prediction pipeline works!')
# @app.route('/test')
# def get_urls(tic_id):
# urls = Visual_Table.query.filter_by(TIC_ID=tic_id).all()
# urls = [url.dataURL for url in urls]
# return urls
return app
# """Main Application and routing Logic for TESS Flask App"""
# from decouple import config
# from flask import Flask, render_template, request
# from flask_sqlalchemy import SQLAlchemy
# from .models import *
# from .light_curve import *
# from .Data_in import *
# # from .predict import *
# def create_app():
# """create and config an instance of the Flask App"""
# app = Flask(__name__)
# # configure DB, will need to update this when changing DBs?
# app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# app.config['ENV'] = config('ENV')
# DB.init_app(app)
# # with app.app_context():
# # db.create_all()
# # Create home route
# @app.route('/')
# def root():
# #Pull example data from Notebooks folder. Will be be pulled from sql DB in the future.
# return render_template('home.html',
# title = 'Finding Planets:TESS',
# toi_table=(TOI_Table.query.all()),
# tic_table=(TIC_Cat_Table.query.all())
# )
# @app.route('/total_reset')
# def total_reset():
# DB.drop_all()
# DB.create_all()
# get_visual_data()
# get_toi_data()
# get_tic_catalog()
# return render_template('home.html', title='Reset Database!')
# @app.route('/predict')
# def predict():
# get_all_predictions()
# return render_template('home.html', title='prediction pipeline works!')
# # @app.route('/test')
# # def get_urls(tic_id):
# # urls = Visual_Table.query.filter_by(TIC_ID=tic_id).all()
# # urls = [url.dataURL for url in urls]
# # return urls
# return app
# import pickle
# from sklearn.pipeline import make_pipeline
# from sklearn.impute import SimpleImputer
# from sklearn.preprocessing import RobustScaler
# from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# from tensorflow.keras.models import Sequential
# from tensorflow.keras.layers import Dense
# import pandas as pd
# import numpy as np
# from tensorflow.keras.models import load_model
# from .models import DB, TOI_Table, TIC_Cat_Table
# # Gathering the necessary data from sql database:
# def get_data():
# # Pulling data from sql database
# toi_rows = TOI_Table.query.all()
# tic_catalog_rows = TIC_Cat_Table.query.all()
# toi_dict = {'TIC_ID': [row.TIC_ID for row in toi_rows],
# 'TOI': [row.TOI for row in toi_rows],
# 'Epoch': [row.Epoch for row in toi_rows],
# 'Period': [row.Period for row in toi_rows],
# 'Duration': [row.Duration for row in toi_rows],
# 'Depth': [row.Depth for row in toi_rows],
# 'Planet_Radius': [row.Planet_Radius for row in toi_rows],
# 'Planet_Insolation': [row.Planet_Insolation for row in toi_rows],
# 'Planet_Equil_Temp': [row.Planet_Equil_Temp for row in toi_rows],
# 'Planet_SNR': [row.Planet_SNR for row in toi_rows],
# 'Stellar_Distance': [row.Stellar_Distance for row in toi_rows],
# 'Stellar_log_g': [row.Stellar_log_g for row in toi_rows],
# 'Stellar_Radius': [row.Stellar_Radius for row in toi_rows],
# 'TFOPWG_Disposition': [row.TFOPWG_Disposition for row in toi_rows]}
# tic_catalog_dict = {'TIC_ID': [row.TIC_ID for row in tic_catalog_rows],
# 'ra': [row.ra for row in tic_catalog_rows],
# 'dec': [row.dec for row in tic_catalog_rows],
# 'pmRA': [row.pmRA for row in tic_catalog_rows],
# 'pmDEC': [row.pmDEC for row in tic_catalog_rows],
# 'plx': [row.plx for row in tic_catalog_rows],
# 'gallong': [row.gallong for row in tic_catalog_rows],
# 'gallat': [row.gallat for row in tic_catalog_rows],
# 'eclong': [row.eclong for row in tic_catalog_rows],
# 'eclat': [row.eclat for row in tic_catalog_rows],
# 'Tmag': [row.Tmag for row in tic_catalog_rows],
# 'Teff': [row.Teff for row in tic_catalog_rows],
# 'logg': [row.logg for row in tic_catalog_rows],
# 'MH': [row.MH for row in tic_catalog_rows],
# 'rad': [row.rad for row in tic_catalog_rows],
# 'mass': [row.mass for row in tic_catalog_rows],
# 'rho': [row.rho for row in tic_catalog_rows],
# 'lum': [row.lum for row in tic_catalog_rows],
# 'd': [row.d for row in tic_catalog_rows],
# 'ebv': [row.ebv for row in tic_catalog_rows],
# 'numcont': [row.numcont for row in tic_catalog_rows],
# 'contratio': [row.contratio for row in tic_catalog_rows],
# 'priority': [row.priority for row in tic_catalog_rows]}
# toi = pd.DataFrame(toi_dict)
# tic_catalog = pd.DataFrame(tic_catalog_dict)
# df = toi.merge(tic_catalog, on='TIC_ID')
# return df
# # Shaping the data for input:
# def shape_data(df):
# # Dropping data not needed for model:
# X = df.drop(columns=['TIC_ID', 'TOI', 'TFOPWG_Disposition'])
# return X
# # Setting up model architecture for neural net:
# def create_model():
# # Instantiate model:
# model = Sequential()
# # Add input layer:
# model.add(Dense(20, input_dim=33, activation='relu'))
# # Add hidden layer:
# model.add(Dense(20, activation='relu'))
# # Add output layer:
# model.add(Dense(1, activation='sigmoid'))
# # Compile model:
# model.compile(loss='binary_crossentropy', optimizer='adam',
# metrics=['accuracy'])
# return model
# # Loading pipeline:
# def load_pipeline():
# tess_pipeline = pickle.load(open('tess_pipeline.pkl', 'rb'))
# tess_pipeline.named_steps[
# 'kerasclassifier'].model = load_model(
# 'keras_classifier.h5')
# return tess_pipeline
# # Get predictions for all observations:
# def get_all_predictions():
# y_pred_proba_full = load_pipeline().predict_proba(shape_data(get_data()))
# toi_index = pd.DataFrame(get_data()['TOI'])
# output_df = pd.DataFrame(y_pred_proba_full, columns=[
# 'actual_exoplanet_prob', 'false_positive_prob'])
# output_df = toi_index.join(output_df)
# output_df['prediction'] = np.where(
# output_df['actual_exoplanet_prob'] >= output_df[
# 'false_positive_prob'],
# 'Actual Exoplanet', 'False Positive')
# output_df['prediction_prob'] = np.where(output_df[
# 'actual_exoplanet_prob']>= output_df[
# 'false_positive_prob'], output_df[
# 'actual_exoplanet_prob'], output_df[
# 'false_positive_prob'])
# return output_df
# get_all_predictions()
| 45.834375
| 101
| 0.577214
| 1,776
| 14,667
| 4.533784
| 0.121059
| 0.055142
| 0.073522
| 0.062842
| 0.984228
| 0.976279
| 0.958023
| 0.950571
| 0.950571
| 0.950571
| 0
| 0.001595
| 0.315879
| 14,667
| 319
| 102
| 45.978056
| 0.800877
| 0.518579
| 0
| 0
| 0
| 0
| 0.119353
| 0.019965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.141667
| 0.008333
| 0.308333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
978fc91e48a69b62fda28f8d703d52a062332e01
| 177
|
py
|
Python
|
app/routes/views.py
|
izconcept/Turnt
|
28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3
|
[
"Apache-2.0"
] | 4
|
2018-01-29T05:51:32.000Z
|
2018-02-08T05:18:47.000Z
|
app/routes/views.py
|
izconcept/Turnt
|
28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3
|
[
"Apache-2.0"
] | 3
|
2018-01-30T21:41:09.000Z
|
2018-01-31T18:20:01.000Z
|
app/routes/views.py
|
izconcept/Turnt
|
28d25ebfbd43aa6472aa1f0eec7e73ec1b8d15d3
|
[
"Apache-2.0"
] | 1
|
2019-03-29T19:32:28.000Z
|
2019-03-29T19:32:28.000Z
|
from app import app, drinks
from flask import render_template, session, redirect, request
@app.route('/')
def index():
return render_template('index.html', drinks=drinks)
| 22.125
| 61
| 0.745763
| 24
| 177
| 5.416667
| 0.625
| 0.215385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 177
| 7
| 62
| 25.285714
| 0.849673
| 0
| 0
| 0
| 0
| 0
| 0.062147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
c14dc762e4ac7e02bd632552dcbebfc3d90dd126
| 31
|
py
|
Python
|
easyTCP/SERVER/utils/__init__.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | 4
|
2018-12-09T13:57:59.000Z
|
2019-10-19T19:34:28.000Z
|
easyTCP/SERVER/utils/__init__.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
easyTCP/SERVER/utils/__init__.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
#from .BUILD_IN import BUILD_IN
| 31
| 31
| 0.83871
| 6
| 31
| 4
| 0.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.857143
| 0.967742
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c15a975febae16e23c4a925f353cceee52893986
| 147
|
py
|
Python
|
terrascript/pingdom/d.py
|
jackluo923/python-terrascript
|
ed4b626e6d28621ea1b02fc16f7277a094d89830
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/pingdom/d.py
|
jackluo923/python-terrascript
|
ed4b626e6d28621ea1b02fc16f7277a094d89830
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/pingdom/d.py
|
jackluo923/python-terrascript
|
ed4b626e6d28621ea1b02fc16f7277a094d89830
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/pingdom/d.py
import terrascript
class pingdom_contact(terrascript.Data):
pass
class pingdom_team(terrascript.Data):
pass
| 13.363636
| 40
| 0.768707
| 18
| 147
| 6.166667
| 0.555556
| 0.216216
| 0.342342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14966
| 147
| 10
| 41
| 14.7
| 0.888
| 0.163265
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
c171e9fdddb0b26706be73c55350884337b985be
| 19,654
|
py
|
Python
|
test/test_get_metadata_functions.py
|
bjoernmeier/sqlalchemy_exasol
|
d63e70096f227db5eb68e631e98777e0e68ac178
|
[
"BSD-2-Clause"
] | 26
|
2015-10-13T21:43:07.000Z
|
2021-09-22T16:58:02.000Z
|
test/test_get_metadata_functions.py
|
bjoernmeier/sqlalchemy_exasol
|
d63e70096f227db5eb68e631e98777e0e68ac178
|
[
"BSD-2-Clause"
] | 65
|
2015-01-22T10:05:18.000Z
|
2022-01-18T12:11:53.000Z
|
test/test_get_metadata_functions.py
|
bjoernmeier/sqlalchemy_exasol
|
d63e70096f227db5eb68e631e98777e0e68ac178
|
[
"BSD-2-Clause"
] | 23
|
2015-01-21T09:27:05.000Z
|
2022-01-18T11:40:18.000Z
|
# -*- coding: UTF-8 -*-
import pytest
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.sql.sqltypes import INTEGER, VARCHAR
from sqlalchemy.testing import fixtures, config
from sqlalchemy_exasol.base import EXADialect
TEST_GET_METADATA_FUNCTIONS_SCHEMA = "test_get_metadata_functions_schema"
ENGINE_NONE_DATABASE = "ENGINE_NONE_DATABASE"
ENGINE_SCHEMA_DATABASE = "ENGINE_SCHEMA_DATABASE"
ENGINE_SCHEMA_2_DATABASE = "ENGINE_SCHEMA_2_DATABASE"
class MetadataTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.schema = TEST_GET_METADATA_FUNCTIONS_SCHEMA
cls.schema_2 = "test_get_metadata_functions_schema_2"
with config.db.begin() as c:
try:
c.execute("DROP SCHEMA %s CASCADE" % cls.schema)
except Exception as e:
print(e)
pass
c.execute("CREATE SCHEMA %s" % cls.schema)
c.execute(
"CREATE TABLE %s.t (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema)
c.execute(
"CREATE TABLE {schema}.s (id1 int primary key, fid1 int, fid2 int, age int, CONSTRAINT fk_test FOREIGN KEY (fid1,fid2) REFERENCES {schema}.t(pid1,pid2))".format(
schema=cls.schema))
cls.view_defintion = "CREATE VIEW {schema}.v AS select * from {schema}.t".format(schema=cls.schema)
c.execute(cls.view_defintion)
try:
c.execute("DROP SCHEMA %s CASCADE" % cls.schema_2)
except Exception as e:
print(e)
pass
c.execute("CREATE SCHEMA %s" % cls.schema_2)
c.execute(
"CREATE TABLE %s.t_2 (pid1 int, pid2 int, name VARCHAR(20), age int, PRIMARY KEY (pid1,pid2))" % cls.schema_2)
c.execute("CREATE VIEW {schema}.v_2 AS select * from {schema}.t_2".format(schema=cls.schema_2))
c.execute("COMMIT")
cls.engine_none_database = cls.create_engine_with_database_name(c, None)
cls.engine_schema_database = cls.create_engine_with_database_name(c, cls.schema)
cls.engine_schema_2_database = cls.create_engine_with_database_name(c, cls.schema_2)
cls.engine_map = {
ENGINE_NONE_DATABASE: cls.engine_none_database,
ENGINE_SCHEMA_DATABASE: cls.engine_schema_database,
ENGINE_SCHEMA_2_DATABASE: cls.engine_schema_2_database
}
@classmethod
def generate_url_with_database_name(cls, connection, new_database_name):
database_url = config.db_url
new_args = database_url.translate_connect_args()
new_args["database"] = new_database_name
new_database_url = URL(drivername=database_url.drivername, query=database_url.query, **new_args)
return new_database_url
@classmethod
def create_engine_with_database_name(cls, connection, new_database_name):
url = cls.generate_url_with_database_name(connection, new_database_name)
engine = create_engine(url)
return engine
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_schema_names(self, engine_name, use_sql_fallback):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
schema_names = dialect.get_schema_names(connection=c, use_sql_fallback=use_sql_fallback)
assert self.schema in schema_names and self.schema_2 in schema_names
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_schema_names_for_sql_and_odbc(self, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
schema_names_fallback = dialect.get_schema_names(connection=c, use_sql_fallback=True)
schema_names_odbc = dialect.get_schema_names(connection=c)
assert sorted(schema_names_fallback) == sorted(schema_names_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_table_names(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
table_names = dialect.get_table_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback)
assert "t" in table_names and "s" in table_names
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_table_names_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
table_names_fallback = dialect.get_table_names(connection=c, schema=schema, use_sql_fallback=True)
table_names_odbc = dialect.get_table_names(connection=c, schema=schema)
assert table_names_fallback == table_names_odbc
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_has_table_table_exists(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table = dialect.has_table(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
assert has_table, "Table %s.T was not found, but should exist" % self.schema
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_has_table_table_exists_not(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table = dialect.has_table(connection=c, schema=self.schema, table_name="not_exist",
use_sql_fallback=use_sql_fallback)
assert not has_table, "Table %s.not_exist was found, but should not exist" % self.schema
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_has_table_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
has_table_fallback = dialect.has_table(connection=c, schema=schema, use_sql_fallback=True, table_name="t")
has_table_odbc = dialect.has_table(connection=c, schema=schema, table_name="t")
assert has_table_fallback == has_table_odbc, "Expected table %s.t with odbc and fallback" % schema
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_names(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_names = dialect.get_view_names(connection=c, schema=self.schema, use_sql_fallback=use_sql_fallback)
assert "v" in view_names
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_names_for_sys(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_names = dialect.get_view_names(connection=c, schema="sys", use_sql_fallback=use_sql_fallback)
assert len(view_names) == 0
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_definition(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name="v",
use_sql_fallback=use_sql_fallback)
assert self.view_defintion == view_definition
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_view_definition_view_name_none(self, use_sql_fallback,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
view_definition = dialect.get_view_definition(connection=c, schema=self.schema, view_name=None,
use_sql_fallback=use_sql_fallback)
assert view_definition is None
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_view_names_for_sql_and_odbc(self, schema,engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
c.execute("OPEN SCHEMA %s" % self.schema)
view_names_fallback = dialect.get_view_names(connection=c, schema=schema, use_sql_fallback=True)
view_names_odbc = dialect.get_view_names(connection=c, schema=schema)
assert view_names_fallback == view_names_odbc
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_view_definition_for_sql_and_odbc(self, schema,engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
view_name = "v"
dialect = EXADialect()
view_definition_fallback = dialect.get_view_definition(
connection=c, view_name=view_name, schema=schema, use_sql_fallback=True)
view_definition_odbc = dialect.get_view_definition(
connection=c, view_name=view_name, schema=schema)
assert view_definition_fallback == view_definition_odbc
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("table", ["t", "s", "unknown"])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_columns_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema, use_sql_fallback=True)
columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema)
assert str(columns_fallback) == str(columns_odbc) # object equality doesn't work for sqltypes
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_columns_none_table_for_sql_and_odbc(self, schema, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
table = None
columns_fallback = dialect.get_columns(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
columns_odbc = dialect.get_columns(connection=c, table_name=table, schema=schema)
assert str(columns_fallback) == str(columns_fallback)
def make_columns_comparable(self, column_list): # object equality doesn't work for sqltypes
return sorted([{k: str(v) for k, v in column.items()} for column in column_list], key=lambda k: k["name"])
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_columns(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
columns = dialect.get_columns(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
expected = [{'default': None,
'is_distribution_key': False,
'name': 'pid1',
'nullable': False,
'type': INTEGER()},
{'default': None,
'is_distribution_key': False,
'name': 'pid2',
'nullable': False,
'type': INTEGER()},
{'default': None,
'is_distribution_key': False,
'name': 'name',
'nullable': True,
'type': VARCHAR(length=20)},
{'default': None,
'is_distribution_key': False,
'name': 'age',
'nullable': True,
'type': INTEGER()},
]
assert self.make_columns_comparable(expected) == self.make_columns_comparable(columns)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_columns_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
columns = dialect.get_columns(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert columns == []
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("table", ["t", "s"])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_pk_constraint_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema)
dialect = EXADialect()
pk_constraint_fallback = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
pk_constraint_odbc = dialect.get_pk_constraint(connection=c, table_name=table, schema=schema)
assert str(pk_constraint_fallback) == str(pk_constraint_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_pk_constraint(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name="t",
use_sql_fallback=use_sql_fallback)
assert pk_constraint["constrained_columns"] == ['pid1', 'pid2'] and \
pk_constraint["name"].startswith("sys_")
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_pk_constraint_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
pk_constraint = dialect.get_pk_constraint(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert pk_constraint is None
@pytest.mark.parametrize("table", ["t", "s"])
@pytest.mark.parametrize("schema", [TEST_GET_METADATA_FUNCTIONS_SCHEMA, None])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_compare_get_foreign_keys_for_sql_and_odbc(self, schema, table, engine_name):
with self.engine_map[engine_name].begin() as c:
if schema is None:
c.execute("OPEN SCHEMA %s" % self.schema_2)
dialect = EXADialect()
foreign_keys_fallback = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema,
use_sql_fallback=True)
foreign_keys_odbc = dialect.get_foreign_keys(connection=c, table_name=table, schema=schema)
assert str(foreign_keys_fallback) == str(foreign_keys_odbc)
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_foreign_keys(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name="s",
use_sql_fallback=use_sql_fallback)
expected = [{'name': 'fk_test',
'constrained_columns': ['fid1', 'fid2'],
'referred_schema': 'test_get_metadata_functions_schema',
'referred_table': 't',
'referred_columns': ['pid1', 'pid2']}]
assert foreign_keys == expected
@pytest.mark.parametrize("use_sql_fallback", [True, False])
@pytest.mark.parametrize("engine_name", [ENGINE_NONE_DATABASE, ENGINE_SCHEMA_DATABASE, ENGINE_SCHEMA_2_DATABASE])
def test_get_foreign_keys_table_name_none(self, use_sql_fallback, engine_name):
with self.engine_map[engine_name].begin() as c:
dialect = EXADialect()
foreign_keys = dialect.get_foreign_keys(connection=c, schema=self.schema, table_name=None,
use_sql_fallback=use_sql_fallback)
assert foreign_keys == []
| 59.738602
| 177
| 0.666735
| 2,414
| 19,654
| 5.070423
| 0.058409
| 0.056373
| 0.074346
| 0.048039
| 0.816585
| 0.796977
| 0.771405
| 0.739461
| 0.705147
| 0.673203
| 0
| 0.004736
| 0.237255
| 19,654
| 328
| 178
| 59.920732
| 0.811754
| 0.005342
| 0
| 0.508591
| 0
| 0.010309
| 0.093067
| 0.0088
| 0
| 0
| 0
| 0
| 0.079038
| 1
| 0.092784
| false
| 0.006873
| 0.020619
| 0.003436
| 0.130584
| 0.006873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c17c5ab864e3da04534b047a476281e0e31c2fdb
| 108
|
py
|
Python
|
main.py
|
Aayushpatil77/Gryphon_Web_Crawler
|
ee52fe2b353cc040e662056d7888ea420c1a49fa
|
[
"MIT"
] | null | null | null |
main.py
|
Aayushpatil77/Gryphon_Web_Crawler
|
ee52fe2b353cc040e662056d7888ea420c1a49fa
|
[
"MIT"
] | 24
|
2021-11-12T20:33:04.000Z
|
2021-11-24T14:34:31.000Z
|
main.py
|
Aayushpatil77/Gryphon_Web_Crawler
|
ee52fe2b353cc040e662056d7888ea420c1a49fa
|
[
"MIT"
] | 5
|
2021-11-10T18:04:23.000Z
|
2022-02-22T04:38:16.000Z
|
from django.http import HttpResponse
def test():
return HttpResponse("Hello, world. #Homepage")
test()
| 18
| 50
| 0.731481
| 13
| 108
| 6.076923
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 108
| 6
| 51
| 18
| 0.858696
| 0
| 0
| 0
| 0
| 0
| 0.211009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c17dc41b3022979fe55065b808584b80b7482833
| 8,793
|
py
|
Python
|
tests/getl/blocks/load/test_load_entrypoint.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 8
|
2020-06-10T09:00:17.000Z
|
2021-06-07T18:02:19.000Z
|
tests/getl/blocks/load/test_load_entrypoint.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 5
|
2020-07-03T10:39:25.000Z
|
2021-08-30T14:52:47.000Z
|
tests/getl/blocks/load/test_load_entrypoint.py
|
husqvarnagroup/GETL
|
37251abf12bac2efed1fe463b09a288d85969141
|
[
"MIT"
] | 1
|
2020-05-28T07:53:48.000Z
|
2020-05-28T07:53:48.000Z
|
"""Unit test for GETL load method."""
from os import environ
from unittest.mock import Mock
from pyspark.sql import types as T
from getl.blocks.load.entrypoint import (
batch_csv,
batch_delta,
batch_json,
batch_xml,
resolve,
)
# TODO: Need to adapt to different xml version depending on spark version
environ[
"PYSPARK_SUBMIT_ARGS"
] = "--packages com.databricks:spark-xml_2.11:0.9.0 pyspark-shell"
SCHEMA = T.StructType(
[
T.StructField("name", T.StringType(), True),
T.StructField("empid", T.IntegerType(), True),
T.StructField("happy", T.BooleanType(), True),
T.StructField("sad", T.BooleanType(), True),
T.StructField("extra", T.BooleanType(), True),
]
)
# FUNCTIONS
def test_batch_json(spark_session, helpers):
"""batch_json should be able to load json files to a dataframe."""
# Arrange
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.json"),
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"Alias": "alias",
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.collect()[1][0] == "Mark Two"
assert result_df.collect()[2][1] == 11
assert result_df.count() == 3
def test_batch_json_multiLine_options(spark_session, helpers):
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample_multiline.json"),
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"Alias": "alias",
"Options": {"multiLine": True},
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.collect()[1][0] == "Mark Two"
assert result_df.collect()[2][1] == 11
assert result_df.count() == 3
def test_batch_json_fileregistry(spark_session, helpers):
"""batch_json should be able to load json files with file registry."""
# Arrange
file_path = helpers.relative_path(__file__, "./data/sample.json")
file_registry_mock = Mock()
file_registry_mock.get.return_value.load.return_value = [file_path]
helpers.create_s3_files({"schema.json": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": "base_path",
"JsonSchemaPath": "s3://tmp-bucket/schema.json",
"FileRegistry": "SuperReg",
},
file_registry=file_registry_mock,
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == "Mark Steelspitter"
assert result_df.count() == 3
file_registry_mock.get.assert_called_with("SuperReg")
file_registry_mock.get.return_value.load.assert_called_with("base_path", ".json")
def test_batch_json_no_schema(spark_session, helpers):
"""batch_json should be able to load json files and inferSchema."""
# Arrange
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.json"),
"Alias": "alias",
},
)
# Act
result_df = resolve(batch_json, conf)
# Assert
assert result_df.collect()[0][0] == 9
assert result_df.collect()[1][3] == "Mark Two"
assert not result_df.collect()[2][2]
assert result_df.count() == 3
def test_batch_xml(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/employee.xml"),
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 3
def test_batch_xml_no_schema(spark_session, helpers):
"""Test batch_xml can load XML doc without a given schema."""
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/employee.xml"),
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == 123
assert result_df.collect()[1][2] == "name2"
assert result_df.collect()[2][1] == "false"
assert result_df.count() == 3
def test_batch_xml_batching(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": [
helpers.relative_path(__file__, "./data/employee.xml"),
helpers.relative_path(__file__, "./data/employee_2.xml"),
],
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 4
def test_batch_xml_batching_new_column(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents."""
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": [
helpers.relative_path(__file__, "./data/employee.xml"),
helpers.relative_path(__file__, "./data/employee_2.xml"),
helpers.relative_path(__file__, "./data/employee_3.xml"),
],
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
},
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[4][3] is False
assert result_df.count() == 5
def test_batch_xml_fileregistry(spark_session, helpers):
"""Check if the batch_xml loader can load XML documents with a file registry."""
file_path = helpers.relative_path(__file__, "./data/employee.xml")
file_registry_mock = Mock()
file_registry_mock.get.return_value.load.return_value = [file_path]
helpers.create_s3_files({"schema.xml": SCHEMA.json()})
conf = helpers.create_block_conf(
"",
{
"Path": "base_path",
"JsonSchemaPath": "s3://tmp-bucket/schema.xml",
"RowTag": "employee",
"FileRegistry": "SuperReg",
},
file_registry=file_registry_mock,
)
# Act
result_df = resolve(batch_xml, conf)
# Assert
assert result_df.collect()[0][0] == "name1"
assert result_df.count() == 3
file_registry_mock.get.assert_called_with("SuperReg")
file_registry_mock.get.return_value.load.assert_called_with("base_path", ".xml")
def test_batch_csv(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample.csv"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_csv, conf)
# Assert
data = result_df.collect()
assert data[0]["name"] == "Mark Steelspitter"
assert data[0]["empid"] == 9
assert data[0]["happy"] is True
assert data[2]["name"] == "Mark Second"
assert data[2]["empid"] == 11
assert data[2]["happy"] is False
assert result_df.count() == 3
def test_batch_delta(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample-delta"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_delta, conf)
# Assert
data = result_df.collect()
assert data[0]["name"] == "Mark Steelspitter"
assert data[0]["empid"] == 9
assert data[0]["happy"] is True
assert data[2]["name"] == "Mark Second"
assert data[2]["empid"] == 11
assert data[2]["happy"] is False
assert result_df.count() == 3
def test_batch_delta_no_files(spark_session, helpers):
conf = helpers.create_block_conf(
"",
{
"Path": helpers.relative_path(__file__, "./data/sample-delta-nofiles"),
"Options": {"inferSchema": True, "header": True},
},
)
# Act
result_df = resolve(batch_delta, conf)
# Assert
data = result_df.collect()
assert len(data) == 0
| 28.548701
| 85
| 0.60787
| 1,059
| 8,793
| 4.78187
| 0.123702
| 0.06793
| 0.074645
| 0.066351
| 0.842417
| 0.809637
| 0.798381
| 0.789889
| 0.770142
| 0.736572
| 0
| 0.014928
| 0.245764
| 8,793
| 307
| 86
| 28.641694
| 0.748643
| 0.085409
| 0
| 0.564593
| 0
| 0
| 0.164282
| 0.04242
| 0
| 0
| 0
| 0.003257
| 0.215311
| 1
| 0.057416
| false
| 0
| 0.019139
| 0
| 0.076555
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1870c6bef4d20bdc8064b24eaabb94b95f23f4a
| 161
|
py
|
Python
|
app/admin.py
|
kirezibana/awards
|
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
|
[
"Unlicense"
] | null | null | null |
app/admin.py
|
kirezibana/awards
|
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
|
[
"Unlicense"
] | null | null | null |
app/admin.py
|
kirezibana/awards
|
7f674430e84e84e5fd39106f3af7c42ee2f6b8ac
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Project, Profile, Rate
admin.site.register(Project)
admin.site.register(Profile)
admin.site.register(Rate)
| 23
| 42
| 0.813665
| 23
| 161
| 5.695652
| 0.478261
| 0.206107
| 0.389313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 161
| 6
| 43
| 26.833333
| 0.891156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c19b66b11e8920b6ef16169525e7de5c6d536e86
| 3,675
|
py
|
Python
|
tests/test_expr_where.py
|
alexkyllo/kusto-tool
|
c7689dfa363a1c8d40532cc7929570154fe82307
|
[
"MIT"
] | null | null | null |
tests/test_expr_where.py
|
alexkyllo/kusto-tool
|
c7689dfa363a1c8d40532cc7929570154fe82307
|
[
"MIT"
] | null | null | null |
tests/test_expr_where.py
|
alexkyllo/kusto-tool
|
c7689dfa363a1c8d40532cc7929570154fe82307
|
[
"MIT"
] | null | null | null |
from kusto_tool import expression as exp
from pytest import fixture
from .fake_database import FakeDatabase
@fixture
def db():
return FakeDatabase("test", "testdb")
@fixture
def tbl(db):
return exp.TableExpr("tbl", database=db, columns={"foo": str, "bar": int})
def test_where_eq_str():
actual = str(exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), "a")))
expected = "| where foo == 'a'"
assert actual == expected
def test_where_eq_int():
actual = str(exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), 2)))
expected = "| where foo == 2"
assert actual == expected
def test_where_ne_int():
actual = str(exp.Where(exp.Infix(exp.OP.NE, exp.Column("foo", str), 2)))
expected = "| where foo != 2"
assert actual == expected
def test_where_lt_int():
actual = str(exp.Where(exp.Infix(exp.OP.LT, exp.Column("foo", str), 2)))
expected = "| where foo < 2"
assert actual == expected
def test_where_lte_int():
actual = str(exp.Where(exp.Infix(exp.OP.LE, exp.Column("foo", str), 2)))
expected = "| where foo <= 2"
assert actual == expected
def test_where_gt_int():
actual = str(exp.Where(exp.Infix(exp.OP.GT, exp.Column("foo", str), 2)))
expected = "| where foo > 2"
assert actual == expected
def test_where_gte_int():
actual = str(exp.Where(exp.Infix(exp.OP.GE, exp.Column("foo", str), 2)))
expected = "| where foo >= 2"
assert actual == expected
def test_table_where_eq(tbl):
q = str(tbl.where(tbl.bar == "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar == 'foo'\n"
assert q == ex
def test_table_where_ne(tbl):
q = str(tbl.where(tbl.bar != "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar != 'foo'\n"
assert q == ex
def test_table_where_lt(tbl):
q = str(tbl.where(tbl.bar < "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar < 'foo'\n"
assert q == ex
def test_table_where_le(tbl):
q = str(tbl.where(tbl.bar <= "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar <= 'foo'\n"
assert q == ex
def test_table_where_gt(tbl):
q = str(tbl.where(tbl.bar > "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar > 'foo'\n"
assert q == ex
def test_table_where_ge(tbl):
q = str(tbl.where(tbl.bar >= "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar >= 'foo'\n"
assert q == ex
def test_where_repr():
where = exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), 2))
assert repr(where) == "Where(Column(\"foo\", <class 'str'>) == 2)"
def test_where_and():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where((foo == "a") & (bar == "b"))
assert str(where) == "| where (foo == 'a') and (bar == 'b')"
def test_where_or():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where((foo == "a") | (bar == "b"))
assert str(where) == "| where (foo == 'a') or (bar == 'b')"
def test_not():
foo = exp.Column("foo", bool)
where = exp.Where(~(foo == "a"))
assert str(where) == "| where not(foo == 'a')"
def test_where_cols():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where(foo == bar)
assert str(where) == "| where foo == bar"
def test_where_isin():
foo = exp.Column("foo", str)
where = exp.Where(foo.isin("bar", "baz"))
assert str(where) == "| where foo in ('bar', 'baz')"
def test_where_isin_int():
foo = exp.Column("foo", str)
where = exp.Where(foo.isin(1, 2, 3))
assert str(where) == "| where foo in (1, 2, 3)"
| 26.824818
| 78
| 0.593741
| 564
| 3,675
| 3.771277
| 0.10461
| 0.06582
| 0.078984
| 0.091678
| 0.764457
| 0.746121
| 0.70851
| 0.70851
| 0.70851
| 0.629525
| 0
| 0.006796
| 0.199184
| 3,675
| 136
| 79
| 27.022059
| 0.715936
| 0
| 0
| 0.252747
| 0
| 0
| 0.220136
| 0.073469
| 0
| 0
| 0
| 0
| 0.21978
| 1
| 0.241758
| false
| 0
| 0.032967
| 0.021978
| 0.296703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c19e5fe5435a4cf4b75d4a2f8a46480d0ba1db04
| 2,842
|
py
|
Python
|
tests/test_config.py
|
drdavella/tox-conda
|
8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c
|
[
"MIT"
] | 2
|
2018-12-05T18:37:46.000Z
|
2018-12-29T02:41:23.000Z
|
tests/test_config.py
|
drdavella/tox-conda
|
8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c
|
[
"MIT"
] | 7
|
2018-11-03T14:55:23.000Z
|
2019-03-27T20:26:07.000Z
|
tests/test_config.py
|
drdavella/tox-conda
|
8cb9d2f4fed1f7b3e851a2460bbd7756fad7d19c
|
[
"MIT"
] | 1
|
2018-12-28T16:00:19.000Z
|
2018-12-28T16:00:19.000Z
|
import pytest
def test_conda_deps(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
world
something
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert len(config.envconfigs['py1'].conda_deps) == 2
# For now, as a workaround, we temporarily add all conda dependencies to
# deps as well. This allows tox to know whether an environment needs to be
# updated or not. Eventually there may be a cleaner solution.
assert len(config.envconfigs['py1'].deps) == 3
assert 'world' == config.envconfigs['py1'].conda_deps[0].name
assert 'something' == config.envconfigs['py1'].conda_deps[1].name
def test_no_conda_deps(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert hasattr(config.envconfigs['py1'], 'conda_channels')
assert len(config.envconfigs['py1'].conda_deps) == 0
assert len(config.envconfigs['py1'].conda_channels) == 0
assert len(config.envconfigs['py1'].deps) == 1
def test_conda_channels(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
something
else
conda_channels=
conda-forge
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert hasattr(config.envconfigs['py1'], 'conda_channels')
assert len(config.envconfigs['py1'].conda_channels) == 1
assert 'conda-forge' in config.envconfigs['py1'].conda_channels
def test_conda_force_deps(tmpdir, newconfig):
config = newconfig(
['--force-dep=something<42.1'],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
something
else
conda_channels=
conda-forge
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert len(config.envconfigs['py1'].conda_deps) == 2
assert 'something<42.1' == config.envconfigs['py1'].conda_deps[0].name
| 26.811321
| 78
| 0.574243
| 301
| 2,842
| 5.322259
| 0.20598
| 0.2397
| 0.237204
| 0.224719
| 0.792759
| 0.734082
| 0.692884
| 0.618602
| 0.618602
| 0.618602
| 0
| 0.021989
| 0.295918
| 2,842
| 105
| 79
| 27.066667
| 0.778611
| 0.071429
| 0
| 0.641509
| 0
| 0
| 0.105182
| 0.01334
| 0
| 0
| 0
| 0
| 0.45283
| 1
| 0.075472
| false
| 0
| 0.018868
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1e24833c06f938f20d6edff45d2cb02a8374976
| 145
|
py
|
Python
|
real_graph_select.py
|
Beaconsyh08/Real_Graph_Select
|
a164e76102ecd5aa78763050fd05029acb0b4993
|
[
"MIT"
] | null | null | null |
real_graph_select.py
|
Beaconsyh08/Real_Graph_Select
|
a164e76102ecd5aa78763050fd05029acb0b4993
|
[
"MIT"
] | null | null | null |
real_graph_select.py
|
Beaconsyh08/Real_Graph_Select
|
a164e76102ecd5aa78763050fd05029acb0b4993
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import Poem
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Poem': Poem}
| 18.125
| 35
| 0.731034
| 23
| 145
| 4.434783
| 0.521739
| 0.137255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158621
| 145
| 7
| 36
| 20.714286
| 0.836066
| 0
| 0
| 0
| 0
| 0
| 0.041379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c1f4638cedf017f2f1d8ccd8e4707001e8f8c9ff
| 27
|
py
|
Python
|
Python POO/main.py
|
lucasjlgc/Python-POO
|
56c98e7abb47a0268396f1981e58a0a2441db4fe
|
[
"MIT"
] | null | null | null |
Python POO/main.py
|
lucasjlgc/Python-POO
|
56c98e7abb47a0268396f1981e58a0a2441db4fe
|
[
"MIT"
] | null | null | null |
Python POO/main.py
|
lucasjlgc/Python-POO
|
56c98e7abb47a0268396f1981e58a0a2441db4fe
|
[
"MIT"
] | null | null | null |
from pessoa import Pessoa
| 9
| 25
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 2
| 26
| 13.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de14812da4cbab8dfe169e0a68268beebae12f71
| 230
|
py
|
Python
|
HeavyFlavorAnalysis/Skimming/python/tauTo3MuSequences_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
HeavyFlavorAnalysis/Skimming/python/tauTo3MuSequences_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
HeavyFlavorAnalysis/Skimming/python/tauTo3MuSequences_cff.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
from HeavyFlavorAnalysis.Skimming.tauTo3MuHLTPath_cfi import *
from HeavyFlavorAnalysis.Skimming.tauTo3MuFilter_cfi import *
tauTo3MuSkim = cms.Sequence(tauTo3MuHLTFilter+tauTo3MuFilter)
| 32.857143
| 62
| 0.869565
| 23
| 230
| 8.608696
| 0.652174
| 0.232323
| 0.313131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023474
| 0.073913
| 230
| 6
| 63
| 38.333333
| 0.906103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a9a7302bf2bdc1e18b089f25ec17e63f11337e81
| 22
|
py
|
Python
|
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
Carlosgm02/UWP-Languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | 6
|
2019-10-30T08:41:15.000Z
|
2021-02-24T09:20:46.000Z
|
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
carlosgm02/uwp-languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | null | null | null |
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
carlosgm02/uwp-languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | null | null | null |
from _winrt import *
| 11
| 21
| 0.727273
| 3
| 22
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 22
| 1
| 22
| 22
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a9f12a5d988555852c0080f8c8a1c4ce957aa4ba
| 14,149
|
py
|
Python
|
authors/apps/articles/test/test_create_articles.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/articles/test/test_create_articles.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | 31
|
2018-11-26T17:42:35.000Z
|
2022-03-11T23:36:55.000Z
|
authors/apps/articles/test/test_create_articles.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | 6
|
2018-11-23T09:55:02.000Z
|
2021-06-17T15:18:49.000Z
|
import json
from .base import BaseTestArticles
from rest_framework.views import status
from ..models import Article
from ...authentication.models import User
from ....apps.profiles.models import Profile
class TestArticles(BaseTestArticles):
def data3_user_jwt(self):
return User.objects.create_user(**self.data3['user']).token()
def super_user_jwt(self):
user = User.objects.create_superuser(**self.data3['user'])
return user.token()
def test_create_models_article(self):
user = User.objects.create_user(
username='henry', email='henry@gmail.com',
password='Pass12')
user.is_verified = True
user = User.objects.filter(email='henry@gmail.com').first()
author = Profile.objects.get(user_id=user.id)
article = Article.objects.create(
title='article title', author=author)
self.assertEqual(str(article), article.title)
def create_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
return response.data['slug']
def test_create_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_article_long_title(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article_log_tile, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles/'+slug+'', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_article_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.get(
'/api/articles/{}'.format(fake_slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.delete(
'/api/articles/'+slug+'/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_article_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.delete(
'/api/articles/{}/'.format(fake_slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_non_existing_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.put(
'/api/articles/ffhfh-ggrg/', data=self.updated_article, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.put(
'/api/articles/'+slug+'/', data=self.updated_article, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_get_no_existing_published_articles(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_all_published_articles(self):
self.create_article()
self.create_article()
self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_all_tags(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
slug = self.create_article()
response = self.client.get(
'/api/{}/tags/'.format(slug, format='json')
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['tags'], ['Rails', 'Golang', 'magic!'])
def test_delete_tag(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
slug = self.create_article()
response = self.client.delete(
'/api/{}/tags/magic!/'.format(slug, format='json')
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_article_not_author(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
response = json.loads(response.content)
slug = response['article']['slug']
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.data3_user_jwt())
res = self.client.delete(
'/api/articles/{}/'.format(slug))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
json.loads(res.content)['article']['error'],
'You can only delete your own articles'
)
def test_report_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
json.loads(response.content)['reported']['reason'],
'article contains porn'
)
def test_report_article_doesnot_exist(self):
slug = 'fake-slug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
json.loads(response.content)['errors'],
'This article doesnot exist'
)
def test_report_article_no_data(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug), data={}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content)['errors'],
'Provide reason for reporting'
)
def test_report_article_empty_reason(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
self.report_article_data_empty_reason, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content)['errors']['reason'],
['This field may not be blank.']
)
def test_report_article_more_than_5_times(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
for article in range(6):
self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(
json.loads(response.content)['errors'],
'This article has been reported more than 5 times'
)
def test_fetch_all_reported_articles_non_superuser(self):
self.create_article()
self.create_article()
self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
json.loads(response.content)['reported']['detail'],
'You do not have permission to perform this action.'
)
def test_fetch_all_reported_articles_superuser(self):
slug1 = self.create_article()
slug2 = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
self.client.post(
'/api/articles/{}/report/'.format(slug1),
data=self.report_article_data, format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug2),
data=self.report_article_data, format='json')
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
slug2,
json.loads(response.content)[
'reported']['articles'][0]['article_slug'],
)
def test_fetch_all_reported_articles_that_dont_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
json.loads(response.content)['reported']['articles']['message'],
'There are no reported articles'
)
def test_revert_reported_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.put(
'/api/reported/{}/revert/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['reported']['message'],
'article restored successully'
)
def test_revert_reported_article_doesnot_exist(self):
slug = 'fake_slug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.put(
'/api/reported/{}/revert/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_reported_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.delete(
'/api/reported/{}/delete/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['reported']['message'],
'article was deleted successully'
)
def test_delete_reported_article_doesnot_exist(self):
self.create_article()
slug = 'fakeslug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.delete(
'/api/reported/{}/delete/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
response.data['reported']['error'],
'This article doesnot exist'
)
def test_articles_pagination(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
self.client.post(
'/api/articles/', data=self.article, format='json')
response = self.client.get(
'/api/articles?page=1&limit=1',format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_page_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles?page=5',format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 41.250729
| 82
| 0.627111
| 1,560
| 14,149
| 5.480769
| 0.103205
| 0.083041
| 0.052398
| 0.090643
| 0.818012
| 0.79848
| 0.767719
| 0.749123
| 0.740117
| 0.725029
| 0
| 0.009834
| 0.245388
| 14,149
| 342
| 83
| 41.371345
| 0.790953
| 0
| 0
| 0.62623
| 0
| 0
| 0.126096
| 0.030888
| 0
| 0
| 0
| 0
| 0.134426
| 1
| 0.101639
| false
| 0.003279
| 0.019672
| 0.003279
| 0.134426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e70bb3a04a1fd426e40b7909cae8bda16260e6c6
| 1,067
|
py
|
Python
|
pyenv/lib/python3.6/_weakrefset.py
|
ronald-rgr/ai-chatbot-smartguide
|
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
|
[
"Apache-2.0"
] | null | null | null |
pyenv/lib/python3.6/_weakrefset.py
|
ronald-rgr/ai-chatbot-smartguide
|
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
|
[
"Apache-2.0"
] | 3
|
2020-03-23T18:01:51.000Z
|
2021-03-19T23:15:15.000Z
|
pyenv/lib/python3.6/_weakrefset.py
|
ronald-rgr/ai-chatbot-smartguide
|
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
|
[
"Apache-2.0"
] | null | null | null |
XSym
0078
350008fd8e0cf4fdaa063759c1fd071b
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/_weakrefset.py
| 213.4
| 945
| 0.099344
| 15
| 1,067
| 7
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228814
| 0.88941
| 1,067
| 5
| 945
| 213.4
| 0.661017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e7192a652cc3b4f56fd8f1600fe41d11e8eebf58
| 221
|
py
|
Python
|
tests/file_to_strip.py
|
smok-serwis/strip-docs
|
17ceb3aef9b3a8b9f80592da51cd8162edfc7600
|
[
"MIT"
] | null | null | null |
tests/file_to_strip.py
|
smok-serwis/strip-docs
|
17ceb3aef9b3a8b9f80592da51cd8162edfc7600
|
[
"MIT"
] | null | null | null |
tests/file_to_strip.py
|
smok-serwis/strip-docs
|
17ceb3aef9b3a8b9f80592da51cd8162edfc7600
|
[
"MIT"
] | null | null | null |
"""
Strip me
"""
class EmptyClass(object):
"""Strip me"""
pass
def empty_method(self):
"""Strip me"""
pass
def empty_routine():
"""
Strip me
:return: nothing
"""
pass
| 10.045455
| 27
| 0.493213
| 23
| 221
| 4.652174
| 0.565217
| 0.261682
| 0.205607
| 0.261682
| 0.35514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.348416
| 221
| 21
| 28
| 10.52381
| 0.743056
| 0.239819
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e72663b878c8d9076da42c6d7b5d2a0e8a31201f
| 26
|
py
|
Python
|
code/src/__init__.py
|
nzw0301/Understanding-Negative-Samples-in-Instance-Discriminative-Self-supervised-Representation-Learning
|
957173bd8ec5b5e00994099d8b4467c74b802303
|
[
"MIT"
] | 4
|
2021-10-06T07:04:43.000Z
|
2022-01-28T09:31:29.000Z
|
code/src/__init__.py
|
nzw0301/Understanding-Negative-Samples
|
957173bd8ec5b5e00994099d8b4467c74b802303
|
[
"MIT"
] | null | null | null |
code/src/__init__.py
|
nzw0301/Understanding-Negative-Samples
|
957173bd8ec5b5e00994099d8b4467c74b802303
|
[
"MIT"
] | null | null | null |
from .loss import NT_Xent
| 13
| 25
| 0.807692
| 5
| 26
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e77ae9d82735c469e3ecb3a79c3b10547249e4e0
| 350
|
py
|
Python
|
Demo/Demo_gym/envs/classic_control/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | null | null | null |
Demo/Demo_gym/envs/classic_control/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | 3
|
2019-03-09T02:35:24.000Z
|
2019-09-27T11:05:01.000Z
|
Demo/Demo_gym/envs/classic_control/__init__.py
|
Remosy/iceHocekeyIRL
|
1ffeaf8a9bd9585038629be41a2da552e0a4473b
|
[
"MIT"
] | null | null | null |
from Demo_gym.envs.classic_control.cartpole import CartPoleEnv
from Demo_gym.envs.classic_control.mountain_car import MountainCarEnv
from Demo_gym.envs.classic_control.continuous_mountain_car import Continuous_MountainCarEnv
from Demo_gym.envs.classic_control.pendulum import PendulumEnv
from Demo_gym.envs.classic_control.acrobot import AcrobotEnv
| 50
| 91
| 0.897143
| 49
| 350
| 6.122449
| 0.346939
| 0.133333
| 0.183333
| 0.25
| 0.576667
| 0.576667
| 0.286667
| 0
| 0
| 0
| 0
| 0
| 0.06
| 350
| 6
| 92
| 58.333333
| 0.911854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.