hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a82e8acd38b4f56fc416ea690cd3394e725bc03
| 49
|
py
|
Python
|
python_crash_course/functions/import.py
|
heniu1985/Learning
|
e865d59df9c4b1b0d7fc0158635c2a035f20bd4f
|
[
"MIT"
] | null | null | null |
python_crash_course/functions/import.py
|
heniu1985/Learning
|
e865d59df9c4b1b0d7fc0158635c2a035f20bd4f
|
[
"MIT"
] | null | null | null |
python_crash_course/functions/import.py
|
heniu1985/Learning
|
e865d59df9c4b1b0d7fc0158635c2a035f20bd4f
|
[
"MIT"
] | null | null | null |
from sandwich import *
sandwich("ser", "szynka")
| 16.333333
| 25
| 0.714286
| 6
| 49
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 49
| 3
| 25
| 16.333333
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0.18
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8a89f0e5f02c085537c3619ab9f7a5d7e477bd0a
| 191
|
py
|
Python
|
testIronPython.py
|
AndreasMuehlmann/Drone-Simulation
|
20fa39d16b0ac4a0dcb6102f030c313a03591be5
|
[
"MIT"
] | null | null | null |
testIronPython.py
|
AndreasMuehlmann/Drone-Simulation
|
20fa39d16b0ac4a0dcb6102f030c313a03591be5
|
[
"MIT"
] | null | null | null |
testIronPython.py
|
AndreasMuehlmann/Drone-Simulation
|
20fa39d16b0ac4a0dcb6102f030c313a03591be5
|
[
"MIT"
] | null | null | null |
import imp
import os
class Test():
def __init__(self, value):
self.value = value
def test(self):
return 187 + self.value
print(os.path.abspath(__file__))
| 14.692308
| 36
| 0.602094
| 25
| 191
| 4.28
| 0.6
| 0.252336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.293194
| 191
| 13
| 36
| 14.692308
| 0.77037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.125
| 0.75
| 0.125
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8a8accc8913cf50ea33ea40d2b03b335547723ea
| 129
|
py
|
Python
|
ros_ws/src/baxter_examples/scripts/send_urdf_fragment.py
|
mesneym/Baxter-Arm-PP
|
fdbf86309bc64c31af105daa026b2f8519710129
|
[
"MIT"
] | null | null | null |
ros_ws/src/baxter_examples/scripts/send_urdf_fragment.py
|
mesneym/Baxter-Arm-PP
|
fdbf86309bc64c31af105daa026b2f8519710129
|
[
"MIT"
] | null | null | null |
ros_ws/src/baxter_examples/scripts/send_urdf_fragment.py
|
mesneym/Baxter-Arm-PP
|
fdbf86309bc64c31af105daa026b2f8519710129
|
[
"MIT"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:d96b3a18312eefe6f36c9b703a3a4c596fd7f454e6ffac8da72ddd23224f1113
size 4348
| 32.25
| 75
| 0.883721
| 13
| 129
| 8.769231
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.365854
| 0.046512
| 129
| 3
| 76
| 43
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a934fdb77f5f684075fa4f25dc288561eda263e
| 26
|
py
|
Python
|
pymagnitude/third_party_mock/parsimonious/exceptions/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520
|
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party_mock/parsimonious/exceptions/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87
|
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party_mock/parsimonious/exceptions/__init__.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121
|
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
class ParseError:
pass
| 13
| 17
| 0.730769
| 3
| 26
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 26
| 2
| 18
| 13
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
8aa0817fc50de4c5b73e45ae4d4bd7219ab783e6
| 1,201
|
py
|
Python
|
mll/tests/test_receiver_fc.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | 2
|
2021-07-09T16:32:00.000Z
|
2022-03-21T17:32:39.000Z
|
mll/tests/test_receiver_fc.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | null | null | null |
mll/tests/test_receiver_fc.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | 1
|
2021-07-09T16:32:02.000Z
|
2021-07-09T16:32:02.000Z
|
import torch
import numpy as np
from mll.recv_models import fc1l_model, fc2l_model
def test_fc1l():
N = 5
utt_len = 20
vocab_size = 4
embedding_size = 11
num_meaning_types = 5
meanings_per_type = 10
inputs = torch.from_numpy(np.random.choice(vocab_size + 1, (utt_len, N), replace=True))
fc1l = fc1l_model.FC1LModel(
embedding_size=embedding_size, vocab_size=vocab_size, utt_len=utt_len, num_meaning_types=num_meaning_types,
meanings_per_type=meanings_per_type)
output = fc1l(inputs)
assert list(output.size()) == [N, num_meaning_types, meanings_per_type]
def test_fc2l():
N = 5
utt_len = 20
vocab_size = 4
embedding_size = 11
num_meaning_types = 5
meanings_per_type = 10
inputs = torch.from_numpy(np.random.choice(vocab_size + 1, (utt_len, N), replace=True))
fc1l = fc2l_model.FC2LModel(
embedding_size=embedding_size, vocab_size=vocab_size, utt_len=utt_len, num_meaning_types=num_meaning_types,
meanings_per_type=meanings_per_type, dropout=0.5)
output = fc1l(inputs)
print('output.size()', output.size())
assert list(output.size()) == [N, num_meaning_types, meanings_per_type]
| 31.605263
| 115
| 0.712739
| 181
| 1,201
| 4.381215
| 0.248619
| 0.06053
| 0.151324
| 0.116015
| 0.759143
| 0.759143
| 0.759143
| 0.759143
| 0.759143
| 0.759143
| 0
| 0.034908
| 0.189009
| 1,201
| 37
| 116
| 32.459459
| 0.779261
| 0
| 0
| 0.666667
| 0
| 0
| 0.010824
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.166667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8aa2224eaa6dd04ec21318bd1f91e2b891627006
| 195
|
py
|
Python
|
Informatik1/Finals Prep/HS18/Task_3/reverse_list.py
|
Queentaker/uzh
|
35cccaf910b95d15db21be80c8567eb427202591
|
[
"MIT"
] | 8
|
2021-11-21T10:02:08.000Z
|
2022-03-15T21:02:02.000Z
|
Informatik1/Finals Prep/HS18/Task_3/reverse_list.py
|
Queentaker/uzh
|
35cccaf910b95d15db21be80c8567eb427202591
|
[
"MIT"
] | null | null | null |
Informatik1/Finals Prep/HS18/Task_3/reverse_list.py
|
Queentaker/uzh
|
35cccaf910b95d15db21be80c8567eb427202591
|
[
"MIT"
] | 3
|
2021-11-19T18:52:56.000Z
|
2022-02-27T15:45:59.000Z
|
def reverse(list):
if len(list) < 2:
return list
return [list[-1]] + reverse(list[:-1])
assert reverse([]) == []
assert reverse([2]) == [2]
assert reverse([2, 6, 5]) == [5, 6, 2]
| 24.375
| 42
| 0.533333
| 29
| 195
| 3.586207
| 0.37931
| 0.375
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072848
| 0.225641
| 195
| 8
| 43
| 24.375
| 0.615894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8aa6f63cbcb3ae80120ea8ca4fc299048965cd37
| 57
|
py
|
Python
|
autofront/tests/simple_script_live.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | 1
|
2020-11-16T22:18:03.000Z
|
2020-11-16T22:18:03.000Z
|
autofront/tests/simple_script_live.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
autofront/tests/simple_script_live.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
print('Runtime arguments: ' + str(sys.argv))
| 14.25
| 44
| 0.701754
| 8
| 57
| 5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 57
| 3
| 45
| 19
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
76fd5f139696da3ba5c0aaf6243aef5b5f911874
| 4,041
|
py
|
Python
|
tests/training/test_training.py
|
e-sha/dvs_of_training_framework
|
7cb529e65d7edb8a147ebeb6f9ad1ffc0b843c81
|
[
"MIT"
] | 1
|
2022-03-31T05:19:36.000Z
|
2022-03-31T05:19:36.000Z
|
tests/training/test_training.py
|
e-sha/dvs_of_training_framework
|
7cb529e65d7edb8a147ebeb6f9ad1ffc0b843c81
|
[
"MIT"
] | 1
|
2022-01-12T13:45:38.000Z
|
2022-01-12T13:45:38.000Z
|
tests/training/test_training.py
|
e-sha/dvs_of_training_framework
|
7cb529e65d7edb8a147ebeb6f9ad1ffc0b843c81
|
[
"MIT"
] | 1
|
2020-11-24T10:00:50.000Z
|
2020-11-24T10:00:50.000Z
|
from tests.utils import test_path
import torch
import tempfile
import time
from types import SimpleNamespace
from utils.dataset import DatasetImpl, collate_wrapper
from utils.loss import Losses
from utils.timer import FakeTimer
from utils.training import train, validate
from train_flownet import init_model, construct_train_tools
def test_trainloop():
args = SimpleNamespace(wdw=0.01,
training_steps=1,
rs=0,
optimizer='ADAM',
lr=0.01,
half_life=1,
device=torch.device('cpu'))
data_path = test_path/'data/seq'
shape = [256, 256]
dataset = DatasetImpl(path=data_path,
shape=shape,
augmentation=False,
collapse_length=1,
is_raw=True,
max_seq_length=1)
data_loader = torch.utils.data.DataLoader(dataset,
collate_fn=collate_wrapper,
batch_size=2,
pin_memory=True,
shuffle=False)
model = init_model(
SimpleNamespace(flownet_path=test_path.parent/'EV_FlowNet',
mish=False, sp=None, prefix_length=0,
suffix_length=0, max_sequence_length=1,
dynamic_sample_length=False,
event_representation_depth=9),
device=args.device)
optimizer, scheduler = construct_train_tools(args, model)
evaluator = Losses([tuple(map(lambda x: x // 2 ** i, shape))
for i in range(4)][::-1], 2, args.device)
with tempfile.TemporaryDirectory() as td:
logger = torch.utils.tensorboard.SummaryWriter(log_dir=td)
train(model=model, device=args.device, loader=data_loader,
optimizer=optimizer, num_steps=args.training_steps,
scheduler=scheduler, logger=logger, evaluator=evaluator,
timers=FakeTimer())
del logger
time.sleep(1)
def test_validation():
args = SimpleNamespace(wdw=0.01,
training_steps=1,
rs=0,
optimizer='ADAM',
lr=0.01,
half_life=1,
device=torch.device('cpu'))
data_path = test_path/'data/seq'
shape = [256, 256]
dataset = DatasetImpl(path=data_path,
shape=shape,
augmentation=False,
collapse_length=1,
is_raw=True,
max_seq_length=1)
data_loader = torch.utils.data.DataLoader(dataset,
collate_fn=collate_wrapper,
batch_size=2,
pin_memory=True,
shuffle=False)
model = init_model(
SimpleNamespace(flownet_path=test_path.parent/'EV_FlowNet',
mish=False, sp=None, prefix_length=0,
suffix_length=0, max_sequence_length=1,
dynamic_sample_length=False,
event_representation_depth=9),
device=args.device)
optimizer, scheduler = construct_train_tools(args, model)
evaluator = Losses([tuple(map(lambda x: x // 2 ** i, shape))
for i in range(4)][::-1], 2, args.device)
with tempfile.TemporaryDirectory() as td:
logger = torch.utils.tensorboard.SummaryWriter(log_dir=td)
validate(model=model, device=args.device, loader=data_loader,
samples_passed=0, logger=logger, evaluator=evaluator)
del logger
time.sleep(1)
| 43.451613
| 73
| 0.508537
| 392
| 4,041
| 5.061224
| 0.267857
| 0.021169
| 0.024194
| 0.023185
| 0.760081
| 0.740927
| 0.740927
| 0.740927
| 0.698589
| 0.698589
| 0
| 0.023187
| 0.413017
| 4,041
| 92
| 74
| 43.923913
| 0.813238
| 0
| 0
| 0.790698
| 0
| 0
| 0.012373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0.011628
| 0.116279
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a10890332d5b039cfbecf84d429a339466146d6
| 98
|
py
|
Python
|
autumn/models/sm_sir/mixing_matrix/mixing_adjusters/__init__.py
|
emmamcbryde/AuTuMN-1
|
b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f
|
[
"BSD-2-Clause-FreeBSD"
] | 14
|
2020-03-11T06:15:30.000Z
|
2022-03-09T03:38:35.000Z
|
autumn/models/sm_sir/mixing_matrix/mixing_adjusters/__init__.py
|
emmamcbryde/AuTuMN-1
|
b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f
|
[
"BSD-2-Clause-FreeBSD"
] | 96
|
2020-01-29T05:10:29.000Z
|
2022-03-31T01:48:46.000Z
|
autumn/models/sm_sir/mixing_matrix/mixing_adjusters/__init__.py
|
emmamcbryde/AuTuMN-1
|
b1e7de15ac6ef6bed95a80efab17f0780ec9ff6f
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2020-04-24T00:38:00.000Z
|
2021-08-19T16:19:03.000Z
|
from .age_adjuster import AgeMixingAdjuster
from .location_adjuster import LocationMixingAdjuster
| 32.666667
| 53
| 0.897959
| 10
| 98
| 8.6
| 0.7
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 98
| 2
| 54
| 49
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0a2a46a64a2aeb00ebf7641f8c03a0c5dc226357
| 377
|
py
|
Python
|
venv/Lib/site-packages/gitdb/db/__init__.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 128
|
2015-01-19T02:03:00.000Z
|
2022-03-30T10:54:21.000Z
|
venv/Lib/site-packages/gitdb/db/__init__.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 61
|
2015-01-08T14:32:33.000Z
|
2022-03-15T23:30:08.000Z
|
venv/Lib/site-packages/gitdb/db/__init__.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 53
|
2015-01-19T12:07:59.000Z
|
2022-03-15T16:30:48.000Z
|
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb.db.base import *
from gitdb.db.loose import *
from gitdb.db.mem import *
from gitdb.db.pack import *
from gitdb.db.git import *
from gitdb.db.ref import *
| 31.416667
| 80
| 0.755968
| 63
| 377
| 4.52381
| 0.603175
| 0.189474
| 0.231579
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.140584
| 377
| 11
| 81
| 34.272727
| 0.854938
| 0.533157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a320421e90b6dfa7860fe1cb1efc1f850d8c28a
| 56
|
py
|
Python
|
game_theory/__init__.py
|
badbayesian/game_theory
|
375622020109e0f9a691a1c009e0122b9217f1da
|
[
"MIT"
] | null | null | null |
game_theory/__init__.py
|
badbayesian/game_theory
|
375622020109e0f9a691a1c009e0122b9217f1da
|
[
"MIT"
] | null | null | null |
game_theory/__init__.py
|
badbayesian/game_theory
|
375622020109e0f9a691a1c009e0122b9217f1da
|
[
"MIT"
] | null | null | null |
from game_theory import model, minimax, example_payoffs
| 28
| 55
| 0.857143
| 8
| 56
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 56
| 1
| 56
| 56
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a385d1aa809b5752e21ac61aa4aae665c48d699
| 79
|
py
|
Python
|
software/behavioural_task_example/example_task/LED_conditioning/boot.py
|
amchagas/BeeHive
|
85820893a4e90c57cca5b3d13a6e58d0444fb9ba
|
[
"MIT"
] | null | null | null |
software/behavioural_task_example/example_task/LED_conditioning/boot.py
|
amchagas/BeeHive
|
85820893a4e90c57cca5b3d13a6e58d0444fb9ba
|
[
"MIT"
] | null | null | null |
software/behavioural_task_example/example_task/LED_conditioning/boot.py
|
amchagas/BeeHive
|
85820893a4e90c57cca5b3d13a6e58d0444fb9ba
|
[
"MIT"
] | null | null | null |
# This is script that run when device boot up or wake from sleep.
import main
| 19.75
| 65
| 0.759494
| 15
| 79
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21519
| 79
| 3
| 66
| 26.333333
| 0.967742
| 0.797468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a458d41ef41c995fa41bef35241f0c299482232
| 4,918
|
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .cassandra_cluster import *
from .cassandra_data_center import *
from .cassandra_resource_cassandra_keyspace import *
from .cassandra_resource_cassandra_table import *
from .database_account import *
from .get_cassandra_cluster import *
from .get_cassandra_data_center import *
from .get_cassandra_resource_cassandra_keyspace import *
from .get_cassandra_resource_cassandra_table import *
from .get_database_account import *
from .get_gremlin_resource_gremlin_database import *
from .get_gremlin_resource_gremlin_graph import *
from .get_mongo_db_resource_mongo_db_collection import *
from .get_mongo_db_resource_mongo_db_database import *
from .get_notebook_workspace import *
from .get_private_endpoint_connection import *
from .get_sql_resource_sql_container import *
from .get_sql_resource_sql_database import *
from .get_sql_resource_sql_role_assignment import *
from .get_sql_resource_sql_role_definition import *
from .get_sql_resource_sql_stored_procedure import *
from .get_sql_resource_sql_trigger import *
from .get_sql_resource_sql_user_defined_function import *
from .get_table_resource_table import *
from .gremlin_resource_gremlin_database import *
from .gremlin_resource_gremlin_graph import *
from .list_database_account_connection_strings import *
from .list_database_account_keys import *
from .list_notebook_workspace_connection_info import *
from .mongo_db_resource_mongo_db_collection import *
from .mongo_db_resource_mongo_db_database import *
from .notebook_workspace import *
from .private_endpoint_connection import *
from .sql_resource_sql_container import *
from .sql_resource_sql_database import *
from .sql_resource_sql_role_assignment import *
from .sql_resource_sql_role_definition import *
from .sql_resource_sql_stored_procedure import *
from .sql_resource_sql_trigger import *
from .sql_resource_sql_user_defined_function import *
from .table_resource_table import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.documentdb.v20150401 as v20150401
import pulumi_azure_native.documentdb.v20150408 as v20150408
import pulumi_azure_native.documentdb.v20151106 as v20151106
import pulumi_azure_native.documentdb.v20160319 as v20160319
import pulumi_azure_native.documentdb.v20160331 as v20160331
import pulumi_azure_native.documentdb.v20190801 as v20190801
import pulumi_azure_native.documentdb.v20190801preview as v20190801preview
import pulumi_azure_native.documentdb.v20191212 as v20191212
import pulumi_azure_native.documentdb.v20200301 as v20200301
import pulumi_azure_native.documentdb.v20200401 as v20200401
import pulumi_azure_native.documentdb.v20200601preview as v20200601preview
import pulumi_azure_native.documentdb.v20200901 as v20200901
import pulumi_azure_native.documentdb.v20210115 as v20210115
import pulumi_azure_native.documentdb.v20210301preview as v20210301preview
import pulumi_azure_native.documentdb.v20210315 as v20210315
import pulumi_azure_native.documentdb.v20210401preview as v20210401preview
import pulumi_azure_native.documentdb.v20210415 as v20210415
else:
v20150401 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20150401')
v20150408 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20150408')
v20151106 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20151106')
v20160319 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20160319')
v20160331 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20160331')
v20190801 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20190801')
v20190801preview = _utilities.lazy_import('pulumi_azure_native.documentdb.v20190801preview')
v20191212 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20191212')
v20200301 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20200301')
v20200401 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20200401')
v20200601preview = _utilities.lazy_import('pulumi_azure_native.documentdb.v20200601preview')
v20200901 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20200901')
v20210115 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20210115')
v20210301preview = _utilities.lazy_import('pulumi_azure_native.documentdb.v20210301preview')
v20210315 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20210315')
v20210401preview = _utilities.lazy_import('pulumi_azure_native.documentdb.v20210401preview')
v20210415 = _utilities.lazy_import('pulumi_azure_native.documentdb.v20210415')
| 54.043956
| 96
| 0.842619
| 613
| 4,918
| 6.352365
| 0.164763
| 0.110426
| 0.148433
| 0.200822
| 0.779404
| 0.713919
| 0.334874
| 0.06831
| 0
| 0
| 0
| 0.122969
| 0.098821
| 4,918
| 90
| 97
| 54.644444
| 0.755641
| 0.04697
| 0
| 0
| 1
| 0
| 0.151282
| 0.151282
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.97561
| 0
| 0.97561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a58162987483398d37b9317cec7a9e16c6e71ae
| 4,615
|
py
|
Python
|
tensorflow_text/python/ops/__init__.py
|
nluehr/text
|
1fd2039412faf537473e31f25df7ebe972475018
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_text/python/ops/__init__.py
|
nluehr/text
|
1fd2039412faf537473e31f25df7ebe972475018
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_text/python/ops/__init__.py
|
nluehr/text
|
1fd2039412faf537473e31f25df7ebe972475018
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various TensorFlow ops related to text-processing."""
# pylint: disable=g-import-not-at-top,g-statement-before-imports
try:
from tensorflow.python.ops.ragged import ragged_ops as _ragged_ops
except ImportError:
pass
from tensorflow_text.core.pybinds.pywrap_fast_wordpiece_tokenizer_model_builder import build_fast_wordpiece_model
from tensorflow_text.python.ops.bert_tokenizer import BertTokenizer
from tensorflow_text.python.ops.create_feature_bitmask_op import create_feature_bitmask
from tensorflow_text.python.ops.fast_wordpiece_tokenizer import FastWordpieceTokenizer
from tensorflow_text.python.ops.greedy_constrained_sequence_op import greedy_constrained_sequence
from tensorflow_text.python.ops.hub_module_splitter import HubModuleSplitter
from tensorflow_text.python.ops.hub_module_tokenizer import HubModuleTokenizer
from tensorflow_text.python.ops.item_selector_ops import FirstNItemSelector
from tensorflow_text.python.ops.item_selector_ops import RandomItemSelector
from tensorflow_text.python.ops.masking_ops import mask_language_model
from tensorflow_text.python.ops.masking_ops import MaskValuesChooser
from tensorflow_text.python.ops.mst_ops import max_spanning_tree
from tensorflow_text.python.ops.mst_ops import max_spanning_tree_gradient
from tensorflow_text.python.ops.ngrams_op import ngrams
from tensorflow_text.python.ops.ngrams_op import Reduction
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import find_source_offsets
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8_with_offsets_map
from tensorflow_text.python.ops.pad_along_dimension_op import pad_along_dimension
from tensorflow_text.python.ops.pad_model_inputs_ops import pad_model_inputs
from tensorflow_text.python.ops.pointer_ops import gather_with_default
from tensorflow_text.python.ops.pointer_ops import span_alignment
from tensorflow_text.python.ops.pointer_ops import span_overlaps
from tensorflow_text.python.ops.regex_split_ops import regex_split
from tensorflow_text.python.ops.regex_split_ops import regex_split_with_offsets
from tensorflow_text.python.ops.regex_split_ops import RegexSplitter
from tensorflow_text.python.ops.segment_combiner_ops import combine_segments
from tensorflow_text.python.ops.sentence_breaking_ops import sentence_fragments
from tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer
from tensorflow_text.python.ops.sliding_window_op import sliding_window
from tensorflow_text.python.ops.split_merge_from_logits_tokenizer import SplitMergeFromLogitsTokenizer
from tensorflow_text.python.ops.split_merge_tokenizer import SplitMergeTokenizer
from tensorflow_text.python.ops.splitter import Splitter
from tensorflow_text.python.ops.splitter import SplitterWithOffsets
from tensorflow_text.python.ops.state_based_sentence_breaker_op import StateBasedSentenceBreaker
from tensorflow_text.python.ops.string_ops import coerce_to_structurally_valid_utf8
from tensorflow_text.python.ops.tokenization import Detokenizer
from tensorflow_text.python.ops.tokenization import Tokenizer
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.trimmer_ops import RoundRobinTrimmer
from tensorflow_text.python.ops.trimmer_ops import ShrinkLongestTrimmer
from tensorflow_text.python.ops.trimmer_ops import WaterfallTrimmer
from tensorflow_text.python.ops.unicode_char_tokenizer import UnicodeCharTokenizer
from tensorflow_text.python.ops.unicode_script_tokenizer import UnicodeScriptTokenizer
from tensorflow_text.python.ops.viterbi_constrained_sequence_op import viterbi_constrained_sequence
from tensorflow_text.python.ops.whitespace_tokenizer import WhitespaceTokenizer
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
from tensorflow_text.python.ops.wordshape_ops import WordShape
from tensorflow_text.python.ops.wordshape_ops import wordshape
| 63.219178
| 113
| 0.882557
| 650
| 4,615
| 5.995385
| 0.286154
| 0.183218
| 0.230947
| 0.301771
| 0.518861
| 0.442648
| 0.395689
| 0.285861
| 0.187067
| 0.08622
| 0
| 0.003031
| 0.070639
| 4,615
| 72
| 114
| 64.097222
| 0.905572
| 0.147562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.018519
| 0.962963
| 0
| 0.962963
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a72c036783ee94be2df1beef0f5ca942088ab7f
| 37
|
py
|
Python
|
CPMel(3)/script/__init__.py
|
2921251087/CPMel-1
|
09afe43052f646e9e9bbd5c876614db211b1c42b
|
[
"Apache-2.0"
] | 1
|
2021-08-01T12:49:50.000Z
|
2021-08-01T12:49:50.000Z
|
CPMel(6)/script/__init__.py
|
2921251087/CPMel-1
|
09afe43052f646e9e9bbd5c876614db211b1c42b
|
[
"Apache-2.0"
] | null | null | null |
CPMel(6)/script/__init__.py
|
2921251087/CPMel-1
|
09afe43052f646e9e9bbd5c876614db211b1c42b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#-*-coding:gbk -*-
| 18.5
| 18
| 0.567568
| 5
| 37
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 2
| 19
| 18.5
| 0.617647
| 0.891892
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a9724a6172024fa14933aeee6d49d53068cdbe0
| 139
|
py
|
Python
|
notification/admin.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
notification/admin.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
notification/admin.py
|
Suprita-25/rengine
|
d6aabb49f27f7ad6039477c16a96213b0d80f81f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import NotificationHooks
# Register your models here.
admin.site.register(NotificationHooks)
| 27.8
| 38
| 0.841727
| 17
| 139
| 6.882353
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100719
| 139
| 4
| 39
| 34.75
| 0.936
| 0.18705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6aaa6f372e33b502238967dedc4d70454668092c
| 78
|
py
|
Python
|
07.py
|
r9y9/nlp100
|
391ca6c4fb8afc074ed825404d4ad4efc4467f05
|
[
"MIT"
] | 18
|
2017-08-20T09:15:07.000Z
|
2022-03-11T07:25:14.000Z
|
07.py
|
r9y9/nlp100
|
391ca6c4fb8afc074ed825404d4ad4efc4467f05
|
[
"MIT"
] | null | null | null |
07.py
|
r9y9/nlp100
|
391ca6c4fb8afc074ed825404d4ad4efc4467f05
|
[
"MIT"
] | 8
|
2018-07-29T09:35:35.000Z
|
2019-12-05T08:03:41.000Z
|
def f(x, y, z): return "{}時の{}は{}".format(x, y, z)
print(f(12, "気温", 22.4))
| 15.6
| 50
| 0.487179
| 18
| 78
| 2.111111
| 0.777778
| 0.105263
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.166667
| 78
| 4
| 51
| 19.5
| 0.507692
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
|
0
| 5
|
6aca5ce675c64e868ede168f0c51923f09fcead2
| 716
|
py
|
Python
|
recommender/__init__.py
|
yugandharaloori/Recommender-Systems-Comparison
|
a09ec349bc771167e450ec6b8213988cc148288d
|
[
"MIT"
] | null | null | null |
recommender/__init__.py
|
yugandharaloori/Recommender-Systems-Comparison
|
a09ec349bc771167e450ec6b8213988cc148288d
|
[
"MIT"
] | null | null | null |
recommender/__init__.py
|
yugandharaloori/Recommender-Systems-Comparison
|
a09ec349bc771167e450ec6b8213988cc148288d
|
[
"MIT"
] | null | null | null |
# import standard baseline models
from recommender.algorithms.models import (RecommenderModel,
SVDModel,
ScaledSVD,
CooccurrenceModel,
RandomModel,
PopularityModel)
# import data model
from recommender.algorithms.data import RecommenderData
# import data management routines
from recommender.datasets.movielens import get_movielens_data
from recommender.datasets.bookcrossing import get_bookcrossing_data
from recommender.datasets.netflix import get_netflix_data
from recommender.datasets.amazon import get_amazon_data
| 42.117647
| 67
| 0.634078
| 60
| 716
| 7.433333
| 0.4
| 0.201794
| 0.206278
| 0.181614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.335196
| 716
| 16
| 68
| 44.75
| 0.936975
| 0.113128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.545455
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6acd9da84dcea30b23f8937f73087f5ed7194ee9
| 22
|
py
|
Python
|
instance/__init__.py
|
timothyakinyelu/whack-a-bug-api
|
f7cc7245b268bbbdf2a3d6f1314ae3cecc8b9da8
|
[
"MIT"
] | null | null | null |
instance/__init__.py
|
timothyakinyelu/whack-a-bug-api
|
f7cc7245b268bbbdf2a3d6f1314ae3cecc8b9da8
|
[
"MIT"
] | null | null | null |
instance/__init__.py
|
timothyakinyelu/whack-a-bug-api
|
f7cc7245b268bbbdf2a3d6f1314ae3cecc8b9da8
|
[
"MIT"
] | null | null | null |
# instance/__init__.py
| 22
| 22
| 0.818182
| 3
| 22
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 22
| 1
| 22
| 22
| 0.666667
| 0.909091
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6ae099f03a1b74eff8f190af78ab7ab335f376f4
| 22
|
py
|
Python
|
examples/divmod/ex2.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/divmod/ex2.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/divmod/ex2.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
print(divmod(5.5, 2))
| 11
| 21
| 0.636364
| 5
| 22
| 2.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.090909
| 22
| 1
| 22
| 22
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0aa7a5ef47d9ccf834544ae3e3f4616cbfd61976
| 125
|
py
|
Python
|
cykooz/resizer/__init__.py
|
Cykooz/cykooz.resizer
|
856d2e2299abac4c2dcef3828c89fbaa226274d7
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-10-08T18:54:36.000Z
|
2021-10-09T15:59:09.000Z
|
cykooz/resizer/__init__.py
|
Cykooz/cykooz.resizer
|
856d2e2299abac4c2dcef3828c89fbaa226274d7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
cykooz/resizer/__init__.py
|
Cykooz/cykooz.resizer
|
856d2e2299abac4c2dcef3828c89fbaa226274d7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
:Authors: cykooz
:Date: 20.03.2021
"""
from .alpha import AlphaMulDiv
from .resize import Resizer
from .structs import *
| 15.625
| 30
| 0.728
| 17
| 125
| 5.352941
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 0.144
| 125
| 7
| 31
| 17.857143
| 0.775701
| 0.272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ac1976ad065f1c02009818982086f4a7e5620e4
| 12,851
|
py
|
Python
|
masakari/tests/unit/objects/test_segments.py
|
openstack/masakari
|
7c6380d63e4e39db30176306303323e16b38fa3d
|
[
"Apache-2.0"
] | 70
|
2016-07-22T21:58:00.000Z
|
2022-01-04T06:05:32.000Z
|
masakari/tests/unit/objects/test_segments.py
|
openstack/masakari
|
7c6380d63e4e39db30176306303323e16b38fa3d
|
[
"Apache-2.0"
] | 1
|
2017-08-10T05:14:00.000Z
|
2017-08-10T05:14:00.000Z
|
masakari/tests/unit/objects/test_segments.py
|
openstack/masakari
|
7c6380d63e4e39db30176306303323e16b38fa3d
|
[
"Apache-2.0"
] | 33
|
2016-07-05T02:05:25.000Z
|
2021-12-20T07:40:43.000Z
|
# Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from oslo_utils import timeutils
from masakari.api import utils as api_utils
from masakari import exception
from masakari.objects import fields
from masakari.objects import segment
from masakari.tests.unit.objects import test_objects
from masakari.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_segment = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_segment,
'name': 'foo-segment',
'service_type': 'COMPUTE',
'description': 'fake-description',
'recovery_method': 'auto',
'enabled': True
}
class TestFailoverSegmentObject(test_objects._LocalTest):
@mock.patch('masakari.db.failover_segment_get_by_name')
def test_get_by_name(self, mock_api_get):
mock_api_get.return_value = fake_segment
segment_obj = segment.FailoverSegment.get_by_name(self.context,
'foo-segment')
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context, 'foo-segment')
@mock.patch('masakari.db.failover_segment_get_by_uuid')
def test_get_by_uuid(self, mock_api_get):
mock_api_get.return_value = fake_segment
segment_obj = (segment.FailoverSegment.
get_by_uuid(self.context, uuidsentinel.fake_segment))
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context,
uuidsentinel.fake_segment)
@mock.patch('masakari.db.failover_segment_get_by_id')
def test_get_by_id(self, mock_api_get):
mock_api_get.return_value = fake_segment
fake_id = 123
segment_obj = segment.FailoverSegment.get_by_id(self.context, fake_id)
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context, fake_id)
def _segment_create_attribute(self):
segment_obj = segment.FailoverSegment(context=self.context)
segment_obj.name = 'foo-segment'
segment_obj.description = 'keydata'
segment_obj.service_type = 'fake-user'
segment_obj.recovery_method = 'auto'
segment_obj.uuid = uuidsentinel.fake_segment
return segment_obj
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_create')
def test_create(self, mock_segment_create, mock_notify_about_segment_api):
mock_segment_create.return_value = fake_segment
segment_obj = self._segment_create_attribute()
segment_obj.create()
self.compare_obj(segment_obj, fake_segment)
mock_segment_create.assert_called_once_with(self.context, {
'uuid': uuidsentinel.fake_segment, 'name': 'foo-segment',
'description': 'keydata', 'service_type': 'fake-user',
'recovery_method': 'auto'})
action = fields.EventNotificationAction.SEGMENT_CREATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_create')
def test_recreate_fails(self, mock_segment_create,
mock_notify_about_segment_api):
mock_segment_create.return_value = fake_segment
segment_obj = self._segment_create_attribute()
segment_obj.create()
self.assertRaises(exception.ObjectActionError, segment_obj.create)
mock_segment_create.assert_called_once_with(self.context, {
'uuid': uuidsentinel.fake_segment, 'name': 'foo-segment',
'description': 'keydata', 'service_type': 'fake-user',
'recovery_method': 'auto'})
action = fields.EventNotificationAction.SEGMENT_CREATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_delete')
def test_destroy(self, mock_segment_destroy,
mock_notify_about_segment_api):
segment_obj = self._segment_create_attribute()
segment_obj.id = 123
segment_obj.destroy()
mock_segment_destroy.assert_called_once_with(
self.context, uuidsentinel.fake_segment)
action = fields.EventNotificationAction.SEGMENT_DELETE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_delete')
def test_destroy_failover_segment_found(self, mock_segment_destroy,
mock_notify_about_segment_api):
mock_segment_destroy.side_effect = exception.FailoverSegmentNotFound(
id=123)
segment_obj = self._segment_create_attribute()
segment_obj.id = 123
self.assertRaises(exception.FailoverSegmentNotFound,
segment_obj.destroy)
action = fields.EventNotificationAction.SEGMENT_DELETE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_segment_by_recovery_method(self, mock_api_get):
fake_segment2 = copy.deepcopy(fake_segment)
fake_segment2['name'] = 'fake_segment2'
mock_api_get.return_value = [fake_segment2, fake_segment]
segment_result = (segment.FailoverSegmentList.
get_all(self.context,
filters={'recovery_method': 'auto'}))
self.assertEqual(2, len(segment_result))
self.compare_obj(segment_result[0], fake_segment2)
self.compare_obj(segment_result[1], fake_segment)
mock_api_get.assert_called_once_with(self.context, filters={
'recovery_method': 'auto'
}, limit=None, marker=None, sort_dirs=None, sort_keys=None)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_segment_by_service_type(self, mock_api_get):
fake_segment2 = copy.deepcopy(fake_segment)
fake_segment2['name'] = 'fake_segment'
mock_api_get.return_value = [fake_segment2, fake_segment]
segment_result = (segment.FailoverSegmentList.
get_all(self.context,
filters={'service_type': 'COMPUTE'}))
self.assertEqual(2, len(segment_result))
self.compare_obj(segment_result[0], fake_segment2)
self.compare_obj(segment_result[1], fake_segment)
mock_api_get.assert_called_once_with(self.context, filters={
'service_type': 'COMPUTE'
}, limit=None, marker=None, sort_dirs=None, sort_keys=None)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_limit_and_marker_invalid_marker(self, mock_api_get):
segment_name = 'unknown_segment'
mock_api_get.side_effect = exception.MarkerNotFound(marker=segment_name
)
self.assertRaises(exception.MarkerNotFound,
segment.FailoverSegmentList.get_all,
self.context, limit=5, marker=segment_name)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save(self, mock_segment_update, mock_notify_about_segment_api):
mock_segment_update.return_value = fake_segment
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
segment_object.save()
self.compare_obj(segment_object, fake_segment)
self.assertTrue(mock_segment_update.called)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start),
mock.call(self.context, segment_object, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save_failover_segment_not_found(self, mock_segment_update,
mock_notify_about_segment_api):
mock_segment_update.side_effect = (
exception.FailoverSegmentNotFound(id=uuidsentinel.fake_segment))
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
self.assertRaises(exception.FailoverSegmentNotFound,
segment_object.save)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save_failover_segment_already_exists(self, mock_segment_update,
mock_notify_about_segment_api):
mock_segment_update.side_effect = (
exception.FailoverSegmentExists(name="foo-segment"))
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
self.assertRaises(exception.FailoverSegmentExists, segment_object.save)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
def test_obj_make_compatible(self):
segment_obj = segment.FailoverSegment(context=self.context)
segment_obj.name = "foo-segment"
segment_obj.id = 123
segment_obj.uuid = uuidsentinel.fake_segment
segment_obj.enabled = True
primitive = segment_obj.obj_to_primitive('1.1')
self.assertIn('enabled', primitive['masakari_object.data'])
primitive = segment_obj.obj_to_primitive('1.0')
self.assertNotIn('enabled', primitive['masakari_object.data'])
| 43.26936
| 79
| 0.683215
| 1,474
| 12,851
| 5.610583
| 0.121438
| 0.047158
| 0.045707
| 0.053325
| 0.789117
| 0.752963
| 0.723821
| 0.709311
| 0.700846
| 0.669891
| 0
| 0.00568
| 0.232822
| 12,851
| 296
| 80
| 43.415541
| 0.833147
| 0.046456
| 0
| 0.608696
| 0
| 0
| 0.101994
| 0.05492
| 0
| 0
| 0
| 0
| 0.108696
| 1
| 0.065217
| false
| 0
| 0.03913
| 0
| 0.113043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ad0fcc7ec02767c2c921997026880b302de460a
| 74
|
py
|
Python
|
stripe/api_resources/checkout/__init__.py
|
henry232323/stripe-python
|
953faf3612522f4294393d341138800691f406e0
|
[
"MIT"
] | null | null | null |
stripe/api_resources/checkout/__init__.py
|
henry232323/stripe-python
|
953faf3612522f4294393d341138800691f406e0
|
[
"MIT"
] | null | null | null |
stripe/api_resources/checkout/__init__.py
|
henry232323/stripe-python
|
953faf3612522f4294393d341138800691f406e0
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from stripe.api_resources.checkout.session import Session
| 18.5
| 57
| 0.824324
| 10
| 74
| 6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.108108
| 74
| 3
| 58
| 24.666667
| 0.893939
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ae7d78a1e26ebafcd86015b70779459b853277e
| 203
|
py
|
Python
|
files/exercises/looping-over-data-sets-determining-matches.py
|
mforneris/introduction_to_python_course
|
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
|
[
"CC-BY-4.0"
] | null | null | null |
files/exercises/looping-over-data-sets-determining-matches.py
|
mforneris/introduction_to_python_course
|
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
|
[
"CC-BY-4.0"
] | null | null | null |
files/exercises/looping-over-data-sets-determining-matches.py
|
mforneris/introduction_to_python_course
|
8075973ee89a921a5e2693f649adbf1fc0e0b2cb
|
[
"CC-BY-4.0"
] | 1
|
2020-01-09T10:58:56.000Z
|
2020-01-09T10:58:56.000Z
|
# Which of these files is not matched by the expression glob.glob('../../data/*as*.csv')?
#../../data/gapminder_gdp_africa.csv
#../../data/gapminder_gdp_americas.csv
#../../data/gapminder_gdp_asia.csv
| 29
| 89
| 0.699507
| 30
| 203
| 4.533333
| 0.633333
| 0.154412
| 0.352941
| 0.419118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083744
| 203
| 6
| 90
| 33.833333
| 0.731183
| 0.945813
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0afa4d74a83cc757e339a64748e13e424f48915e
| 344
|
py
|
Python
|
server/public_opinion/views.py
|
tzattack/public_opinion_analysis
|
ed29e5b53564563ac6233de7d49afdd5ee554920
|
[
"MIT"
] | null | null | null |
server/public_opinion/views.py
|
tzattack/public_opinion_analysis
|
ed29e5b53564563ac6233de7d49afdd5ee554920
|
[
"MIT"
] | null | null | null |
server/public_opinion/views.py
|
tzattack/public_opinion_analysis
|
ed29e5b53564563ac6233de7d49afdd5ee554920
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
# 接收请求数据
def index(request):
return render(request, 'public_opinion/index.html')
#return HttpResponse('hello world')
def starter(request):
return render(request, 'public_opinion/starter.html')
#return HttpResponse('hello world')
| 31.272727
| 57
| 0.761628
| 43
| 344
| 6.046512
| 0.511628
| 0.076923
| 0.146154
| 0.2
| 0.546154
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136628
| 344
| 11
| 58
| 31.272727
| 0.875421
| 0.284884
| 0
| 0
| 0
| 0
| 0.214876
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
7c1069fb8d110e82201bd9a8e5df48fa42d80b01
| 328
|
py
|
Python
|
algorithms/recursion/test_count.py
|
dmodena/grokking-algorithms
|
18516a0fcfae629bde062f8e1c273f658c06cf7e
|
[
"MIT"
] | null | null | null |
algorithms/recursion/test_count.py
|
dmodena/grokking-algorithms
|
18516a0fcfae629bde062f8e1c273f658c06cf7e
|
[
"MIT"
] | null | null | null |
algorithms/recursion/test_count.py
|
dmodena/grokking-algorithms
|
18516a0fcfae629bde062f8e1c273f658c06cf7e
|
[
"MIT"
] | null | null | null |
import unittest
from algorithms.recursion.count import count
class TestCount(unittest.TestCase):
def test_count(self):
self.assertEqual(count([]), 0)
self.assertEqual(count([1, 2, 3, 4]), 4)
self.assertEqual(count([1, 2, 3, 4, 5, 6]), 6)
self.assertEqual(count([1, 2, 3, 4, 5, 6, 7, 8]), 8)
| 32.8
| 60
| 0.615854
| 50
| 328
| 4.02
| 0.44
| 0.298507
| 0.39801
| 0.313433
| 0.378109
| 0.378109
| 0.378109
| 0.258706
| 0.258706
| 0
| 0
| 0.085271
| 0.213415
| 328
| 9
| 61
| 36.444444
| 0.693798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c17b79dddae72386ba8f94cd7728b4d51f7bce1
| 781
|
py
|
Python
|
app/tools/auth/__init__.py
|
tomaszkyc/timely
|
3fc2953a4dc250b885683dea38892ade68a63cf2
|
[
"MIT"
] | null | null | null |
app/tools/auth/__init__.py
|
tomaszkyc/timely
|
3fc2953a4dc250b885683dea38892ade68a63cf2
|
[
"MIT"
] | null | null | null |
app/tools/auth/__init__.py
|
tomaszkyc/timely
|
3fc2953a4dc250b885683dea38892ade68a63cf2
|
[
"MIT"
] | null | null | null |
from secrets import token_urlsafe
from flask import current_app
from itsdangerous import URLSafeSerializer
from werkzeug.security import generate_password_hash, check_password_hash
def encrypt_cookie(content):
s = URLSafeSerializer(current_app.config["SECRET_KEY"], salt="cookie")
encrypted_content = s.dumps(content)
return encrypted_content
def decrypt_cookie(encrypted_content):
s = URLSafeSerializer(current_app.config["SECRET_KEY"], salt="cookie")
try:
content = s.loads(encrypted_content)
except:
content = "-1"
return content
def generate_token():
return token_urlsafe(30)
def generate_hash(token):
return generate_password_hash(token)
def _check_token(hash, token):
return check_password_hash(hash, token)
| 23.666667
| 74
| 0.759283
| 97
| 781
| 5.85567
| 0.350515
| 0.084507
| 0.070423
| 0.112676
| 0.211268
| 0.211268
| 0.211268
| 0.211268
| 0.211268
| 0.211268
| 0
| 0.004573
| 0.160051
| 781
| 32
| 75
| 24.40625
| 0.86128
| 0
| 0
| 0.095238
| 1
| 0
| 0.043534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238095
| false
| 0.142857
| 0.190476
| 0.142857
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
7c1d62b30839311293ded9122aad65c91718e943
| 15,595
|
py
|
Python
|
tests/test_serializers.py
|
shosca/django-rest-dataclasses
|
5d119967dc16c1ba6fe4dfddafd6e55bd96b23a9
|
[
"MIT"
] | 14
|
2019-06-25T13:47:17.000Z
|
2021-08-20T16:25:18.000Z
|
tests/test_serializers.py
|
shosca/django-rest-dataclasses
|
5d119967dc16c1ba6fe4dfddafd6e55bd96b23a9
|
[
"MIT"
] | null | null | null |
tests/test_serializers.py
|
shosca/django-rest-dataclasses
|
5d119967dc16c1ba6fe4dfddafd6e55bd96b23a9
|
[
"MIT"
] | 1
|
2019-06-25T13:47:23.000Z
|
2019-06-25T13:47:23.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import dataclasses as da
import enum
from typing import Dict, List
from django.core.exceptions import ValidationError as DjangoValidationError
from django.test import SimpleTestCase
from rest_framework import fields
from rest_framework.exceptions import ValidationError
from rest_dataclasses.serializers import DataclassSerializer
class Color(enum.Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
@da.dataclass
class User:
id: int = da.field(default=None)
name: str = da.field(default=None)
email: str = da.field(default=None)
@da.dataclass
class Point:
x: int = da.field(default=None)
y: int = da.field(default=None)
@da.dataclass
class Line:
a: Point = da.field(default=None)
b: Point = da.field(default=None)
@da.dataclass
class Geometry:
lines: List[Line] = da.field(default=None)
color: Color = da.field(default=None)
@da.dataclass
class Address:
street: str = da.field(default=None)
city: str = da.field(default=None)
@da.dataclass
class Person:
name: str = da.field(default=None)
addresses: Dict[str, Address] = da.field(default=None)
@da.dataclass
class Dummy:
stuff: Dict[str, int] = da.field(default=None)
class TestModelSerializer(SimpleTestCase):
def test_happy_path(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = "__all__"
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": "shosca", "email": "some@email.com"})
def test_bad_fields(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = "id,name"
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
with self.assertRaisesMessage(TypeError, 'The `fields` option must be a list or tuple or "__all__". Got str.'):
serializer.is_valid(raise_exception=True)
def test_bad_exclude(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
exclude = "id,name"
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
with self.assertRaisesMessage(TypeError, "The `exclude` option must be a list or tuple. Got str."):
serializer.is_valid(raise_exception=True)
def test_exclude(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
exclude = ("name",)
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": None, "email": "some@email.com"})
def test_read_only_fields_bad(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = "__all__"
read_only_fields = "name"
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
with self.assertRaisesMessage(TypeError, "The `read_only_fields` option must be a list or tuple. Got str."):
serializer.is_valid(raise_exception=True)
def test_read_only_fields(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = "__all__"
read_only_fields = ("name",)
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": None, "email": "some@email.com"})
def test_with_fields(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = ("id", "name")
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": "shosca", "email": None})
def test_with_missing_declared_field(self):
class Serializer(DataclassSerializer):
name = fields.CharField(required=False)
class Meta:
model = User
fields = ("id", "email")
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
with self.assertRaisesMessage(
AssertionError,
"The field 'name' was declared on serializer Serializer, but has not been included in the 'fields' option.",
):
serializer.is_valid(raise_exception=True)
def test_custom_setter(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = ("name",)
def set_name(self, instance, field_name, value):
instance.name = value
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": None, "name": "shosca", "email": None})
def test_declared_field(self):
class Serializer(DataclassSerializer):
name = fields.CharField(required=False)
class Meta:
model = User
fields = "__all__"
serializer = Serializer(data={"id": 1, "name": "shosca", "email": "some@email.com"})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": "shosca", "email": "some@email.com"})
def test_nested_create(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
serializer = Serializer(data={"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
serializer.is_valid(raise_exception=True)
line = serializer.save()
self.assertDictEqual(da.asdict(line), {"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
def test_nested_inplace_update(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
instance = Line(a=Point(x=1, y=2), b=Point(x=3, y=4))
serializer = Serializer(instance, data={"a": {"x": 5, "y": 6}, "b": {"x": 7, "y": 8}}, partial=True)
serializer.is_valid(raise_exception=True)
line = serializer.save()
self.assertIs(instance, line)
self.assertIs(instance.a, line.a)
self.assertIs(instance.b, line.b)
self.assertDictEqual(da.asdict(line), {"a": {"x": 5, "y": 6}, "b": {"x": 7, "y": 8}})
def test_nested_inplace_with_create(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
instance = Line()
serializer = Serializer(instance, data={"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}}, partial=True)
serializer.is_valid(raise_exception=True)
line = serializer.save()
self.assertIs(instance, line)
self.assertIsInstance(line.a, Point)
self.assertIsInstance(line.b, Point)
self.assertDictEqual(da.asdict(line), {"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
def test_nested_none(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
extra_kwargs = {"a": {"allow_null": True}, "b": {"allow_null": True}}
instance = Line(a=Point(x=1, y=2), b=Point(x=3, y=4))
serializer = Serializer(instance, data={"a": None, "b": None}, partial=True)
serializer.is_valid(raise_exception=True)
line = serializer.save()
self.assertIs(instance, line)
self.assertIsNone(line.a)
self.assertIsNone(line.b)
self.assertDictEqual(da.asdict(line), {"a": None, "b": None})
def test_nested_none_allow_create_false(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
extra_kwargs = {"a": {"allow_null": False, "allow_create": False}, "b": {"allow_null": False}}
instance = Line()
serializer = Serializer(instance, data={"a": {}, "b": {}}, partial=True)
serializer.is_valid(raise_exception=True)
with self.assertRaises(ValidationError):
serializer.save()
def test_create_star_source(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = ["name"]
class StarSerializer(DataclassSerializer):
user = Serializer(source="*")
class Meta:
model = User
fields = ["id", "user"]
serializer = StarSerializer(data={"user": {"name": "shosca"}, "id": 1})
serializer.is_valid(raise_exception=True)
user = serializer.save()
self.assertDictEqual(da.asdict(user), {"id": 1, "name": "shosca", "email": None})
def test_create_source_not_in_validated_data(self):
class Serializer(DataclassSerializer):
class Meta:
model = Line
fields = "__all__"
serializer = Serializer(data={})
serializer.is_valid(raise_exception=True)
line = serializer.save()
self.assertDictEqual(da.asdict(line), {"a": None, "b": None})
def test_validation_error_on_save(self):
class Serializer(DataclassSerializer):
class Meta:
model = User
fields = ["name"]
def perform_update(self, instance, validated_data, errors):
raise DjangoValidationError("test")
class StarSerializer(DataclassSerializer):
user = Serializer(source="*")
class Meta:
model = User
fields = ["id", "user"]
serializer = StarSerializer(data={"user": {"name": "shosca"}, "id": 1})
serializer.is_valid(raise_exception=True)
with self.assertRaises(ValidationError):
serializer.save()
def test_nested_list(self):
class Serializer(DataclassSerializer):
class Meta:
model = Geometry
fields = "__all__"
serializer = Serializer(
data={
"color": "red",
"lines": [
{"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}},
{"a": {"x": 5, "y": 6}, "b": {"x": 7, "y": 8}},
],
}
)
serializer.is_valid(raise_exception=True)
geometry = serializer.save()
self.assertEqual(len(geometry.lines), 2)
self.assertIsInstance(geometry.lines[0], Line)
self.assertIsInstance(geometry.lines[1], Line)
self.assertDictEqual(
da.asdict(geometry),
{
"color": Color.RED,
"lines": [
{"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}},
{"a": {"x": 5, "y": 6}, "b": {"x": 7, "y": 8}},
],
},
)
def test_nested_list_no_data(self):
class Serializer(DataclassSerializer):
class Meta:
model = Geometry
fields = "__all__"
serializer = Serializer(data={})
serializer.is_valid(raise_exception=True)
geometry = serializer.save()
self.assertDictEqual(da.asdict(geometry), {"color": None, "lines": []})
def test_nested_list_disable_nested_update(self):
class Serializer(DataclassSerializer):
class Meta:
model = Geometry
fields = "__all__"
extra_kwargs = {"lines": {"allow_nested_updates": False, "allow_create": False}}
instance = Geometry(lines=[Line(a=Point(x=1, y=2), b=Point(x=3, y=4)), Line(a=Point(x=5, y=6))])
serializer = Serializer(
instance,
data={
"color": "BLUE",
"lines": [
{"a": {"x": 7, "y": 8}, "b": {"x": 9, "y": 10}},
{"a": {"x": 11, "y": 12}, "b": {"x": 13, "y": 14}},
],
},
partial=True,
)
serializer.is_valid(raise_exception=True)
geometry = serializer.save()
self.assertDictEqual(
da.asdict(geometry),
{
"color": Color.BLUE,
"lines": [{"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}}, {"a": {"x": 5, "y": 6}, "b": None}],
},
)
def test_nested_dict_with_dataclass_serializer(self):
class Serializer(DataclassSerializer):
class Meta:
model = Person
fields = "__all__"
serializer = Serializer(
data={"name": "Sherlock Holmes", "addresses": {"work": {"street": "221B Baker Street", "city": "London"}}}
)
serializer.is_valid(raise_exception=True)
person = serializer.save()
self.assertIsInstance(person.addresses["work"], Address)
self.assertDictEqual(
da.asdict(person),
{"name": "Sherlock Holmes", "addresses": {"work": {"street": "221B Baker Street", "city": "London"}}},
)
def test_nested_dict_with_dataclass_serializer_disable_nested_update(self):
class Serializer(DataclassSerializer):
class Meta:
model = Person
fields = "__all__"
extra_kwargs = {"addresses": {"allow_nested_updates": False, "allow_create": False}}
instance = Person(addresses={"work": Address(street="221B Baker Street", city="London")})
serializer = Serializer(
instance,
data={
"name": "Sherlock Holmes",
"addresses": {"work": {"street": "Empire State Building", "city": "New York"}},
},
partial=True,
)
serializer.is_valid(raise_exception=True)
person = serializer.save()
self.assertIsInstance(person.addresses["work"], Address)
self.assertDictEqual(
da.asdict(person),
{"name": "Sherlock Holmes", "addresses": {"work": {"street": "221B Baker Street", "city": "London"}}},
)
def test_nested_dict_with_field_serializer(self):
class Serializer(DataclassSerializer):
class Meta:
model = Dummy
fields = "__all__"
serializer = Serializer(data={})
serializer.is_valid(raise_exception=True)
dummy = serializer.save()
self.assertDictEqual(da.asdict(dummy), {"stuff": None})
serializer = Serializer(data={"stuff": {"a": 1, "b": 2}})
serializer.is_valid(raise_exception=True)
dummy = serializer.save()
self.assertDictEqual(da.asdict(dummy), {"stuff": {"a": 1, "b": 2}})
| 33.976035
| 120
| 0.560885
| 1,646
| 15,595
| 5.171324
| 0.100243
| 0.027491
| 0.042763
| 0.064615
| 0.815085
| 0.791588
| 0.782425
| 0.736725
| 0.689027
| 0.689027
| 0
| 0.010119
| 0.296634
| 15,595
| 458
| 121
| 34.050218
| 0.765886
| 0.001347
| 0
| 0.635328
| 0
| 0.002849
| 0.101914
| 0
| 0
| 0
| 0
| 0
| 0.11396
| 1
| 0.074074
| false
| 0
| 0.025641
| 0
| 0.321937
| 0.002849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c5fdd783d1653f1ce8ad3b50d3f0dcb0705115b
| 90
|
py
|
Python
|
word2vec/nlp/tokenizer.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | null | null | null |
word2vec/nlp/tokenizer.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | null | null | null |
word2vec/nlp/tokenizer.py
|
sdliuyuzhi/word2vec-pytorch
|
2a3aa41983e8b655f1289eb2fc0524c3309f9280
|
[
"MIT"
] | 2
|
2018-12-23T20:42:28.000Z
|
2021-02-21T02:21:17.000Z
|
import re
UNK = "<UNK"
def naive_tokenizer(s):
return re.split(r"\s", s.strip())
| 9
| 37
| 0.6
| 15
| 90
| 3.533333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211111
| 90
| 9
| 38
| 10
| 0.746479
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7c63a7a562c36da97a5770513e09c33ef8062944
| 179
|
py
|
Python
|
tiktok/hot/__init__.py
|
hackertogether/tiktok-crawler
|
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
|
[
"MIT"
] | 37
|
2019-05-07T05:02:09.000Z
|
2022-01-12T06:14:57.000Z
|
tiktok/hot/__init__.py
|
hackertogether/tiktok-crawler
|
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
|
[
"MIT"
] | 4
|
2019-05-23T05:27:25.000Z
|
2020-04-23T18:39:38.000Z
|
tiktok/hot/__init__.py
|
hackertogether/tiktok-crawler
|
eba5bb2b0ecf9e9d82084609d04ab53fc1747121
|
[
"MIT"
] | 17
|
2019-05-06T09:15:18.000Z
|
2022-03-14T15:58:04.000Z
|
from tiktok.hot.search import search
from tiktok.hot.video import video
from tiktok.hot.energy import energy
from tiktok.hot.music import music
from tiktok.hot.trend import trend
| 29.833333
| 36
| 0.832402
| 30
| 179
| 4.966667
| 0.3
| 0.33557
| 0.436242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111732
| 179
| 5
| 37
| 35.8
| 0.937107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7c68fa1d0d614f88f6015e83af6d91f83b0871ee
| 91
|
py
|
Python
|
utils/__init__.py
|
lmycross/segmentation
|
9b1f4bfba4c2933d87c49313234da6cfce1d8ad5
|
[
"MIT"
] | 6
|
2018-03-16T16:57:14.000Z
|
2019-05-02T16:32:06.000Z
|
utils/__init__.py
|
lmycross/segmentation
|
9b1f4bfba4c2933d87c49313234da6cfce1d8ad5
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
lmycross/segmentation
|
9b1f4bfba4c2933d87c49313234da6cfce1d8ad5
|
[
"MIT"
] | null | null | null |
from .crf import *
from .jointtransform import *
from .metrics import *
from .misc import *
| 22.75
| 29
| 0.747253
| 12
| 91
| 5.666667
| 0.5
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 4
| 30
| 22.75
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7cd2749b6748ed415d7e40c53c2d33a6b91a365d
| 605
|
py
|
Python
|
plugins/carbon_black_protection/komand_carbon_black_protection/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/carbon_black_protection/komand_carbon_black_protection/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/carbon_black_protection/komand_carbon_black_protection/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .approve_file_locally.action import ApproveFileLocally
from .ban_file.action import BanFile
from .create_file_rule.action import CreateFileRule
from .get_approval_request.action import GetApprovalRequest
from .get_file_rule.action import GetFileRule
from .resolve_approval_request.action import ResolveApprovalRequest
from .retrieve_file_catalog_entry.action import RetrieveFileCatalogEntry
from .retrieve_file_instance.action import RetrieveFileInstance
from .unapprove_file_locally.action import UnapproveFileLocally
from .unban_file.action import UnbanFile
| 50.416667
| 72
| 0.882645
| 76
| 605
| 6.776316
| 0.473684
| 0.23301
| 0.066019
| 0.08932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080992
| 605
| 11
| 73
| 55
| 0.926259
| 0.061157
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
860b7e3fd920d7c52507f15f12d30c6efeda7c8a
| 11,394
|
py
|
Python
|
golly/Scripts/Python/Rule-Generators/Langtons-Ant-gen.py
|
Nesk8er/WebLife1
|
080ac7c2c4376d2016e2d7c6d428ba3935b28fda
|
[
"PSF-2.0"
] | null | null | null |
golly/Scripts/Python/Rule-Generators/Langtons-Ant-gen.py
|
Nesk8er/WebLife1
|
080ac7c2c4376d2016e2d7c6d428ba3935b28fda
|
[
"PSF-2.0"
] | null | null | null |
golly/Scripts/Python/Rule-Generators/Langtons-Ant-gen.py
|
Nesk8er/WebLife1
|
080ac7c2c4376d2016e2d7c6d428ba3935b28fda
|
[
"PSF-2.0"
] | null | null | null |
# generator for Langton's Ant rules
# inspired by Aldoaldoz: http://www.youtube.com/watch?v=1X-gtr4pEBU
# contact: tim.hutton@gmail.com
import golly
import random
from glife.RuleTree import *
opposite_dirs=[2,3,0,1] # index of opposite direction
# encoding:
# (0-n_colors: empty square)
def encode(c,s,d):
# turmite on color c in state s facing direction d
return n_colors + n_dirs*(n_states*c+s) + d
prefix = 'LangtonsAnt'
# (We choose a different name to the inbuilt Langtons-Ant rule to avoid
# name collision between the rules we output and the existing icons.)
spec = golly.getstring(
'''This script will create a Langton's Ant CA for a given string of actions.
The string specifies which way to turn when standing on a square of each state.
Examples: RL (Langton's Ant), RLR (Chaos), LLRR (Cardioid), LRRL (structure)
Permitted moves: 'L':Left, 'R':Right, 'U':U-turn, 'N':No turn
Enter string:''', 'RL', 'Enter string:')
n_colors = len(spec)
d={'R':'2','L':'8','U':'4','N':'1'} # 1=noturn, 2=right, 4=u-turn, 8=left
turmite_spec = "{{"+','.join(['{'+str((i+1)%n_colors)+','+d[spec[i]]+',0}' for i in range(n_colors)])+"}}"
rule_name = prefix+'_'+spec
action_table = eval(turmite_spec.replace('}',']').replace('{','['))
n_states = len(action_table)
n_dirs=4
# (N.B. The terminology 'state' here refers to the internal state of the finite
# state machine that each Turmite is using, not the contents of each Golly
# cell. We use the term 'color' to denote the symbol on the 2D 'tape'. The
# actual 'Golly state' in this emulation of Turmites is given by the
# "encoding" section below.)
total_states = n_colors+n_colors*n_states*n_dirs
# problem if we try to export more than 255 states
if total_states>255:
golly.warn("Number of states required exceeds Golly's limit of 255\n\nMaximum 51 turns allowed.")
golly.exit()
# what direction would a turmite have been facing to end up here from direction
# d if it turned t: would_have_been_facing[t][d]
would_have_been_facing={
1:[2,3,0,1], # no turn
2:[1,2,3,0], # right
4:[0,1,2,3], # u-turn
8:[3,0,1,2], # left
}
remap = [2,1,3,0] # N,E,S,W -> S,E,W,N
not_arriving_from_here = [range(n_colors) for i in range(n_dirs)] # (we're going to modify them)
for color in range(n_colors):
for state in range(n_states):
turnset = action_table[state][color][1]
for turn in [1,2,4,8]:
if not turn&turnset: # didn't turn this way
for dir in range(n_dirs):
facing = would_have_been_facing[turn][dir]
not_arriving_from_here[dir] += [encode(color,state,facing)]
# What states leave output_color behind?
leaving_color_behind = {}
for output_color in range(n_colors):
leaving_color_behind[output_color] = [output_color] # (no turmite present)
for state in range(n_states):
for color in range(n_colors):
if action_table[state][color][0]==output_color:
leaving_color_behind[output_color] += [encode(color,state,d) for d in range(n_dirs)] # any direction
tree = RuleTree(total_states,4)
# A single turmite is entering this square:
for s in range(n_states):
# collect all the possibilities for a turmite to arrive in state s...
inputs_sc = []
for state in range(n_states):
for color in range(n_colors):
if action_table[state][color][2]==s:
inputs_sc += [(state,color)]
# ...from direction dir
for dir in range(n_dirs):
inputs = []
for state,color in inputs_sc:
turnset = action_table[state][color][1] # sum of all turns
inputs += [encode(color,state,would_have_been_facing[turn][dir]) for turn in [1,2,4,8] if turn&turnset]
if len(inputs)==0:
continue
for central_color in range(n_colors):
# output the required transition
### AKT: this code causes syntax error in Python 2.3:
### transition_inputs = [leaving_color_behind[central_color]] + \
### [ inputs if i==dir else not_arriving_from_here[i] for i in remap ]
transition_inputs = [leaving_color_behind[central_color]]
for i in remap:
if i==dir:
transition_inputs.append(inputs)
else:
transition_inputs.append(not_arriving_from_here[i])
transition_output = encode(central_color,s,opposite_dirs[dir])
tree.add_rule( transition_inputs, transition_output )
# default: square is left with no turmite present
for output_color,inputs in leaving_color_behind.items():
tree.add_rule([inputs]+[range(total_states)]*4,output_color)
tree.write(golly.getdir('rules')+rule_name+'.tree')
# Create some multi-colour icons so we can see what the ant is doing
# Andrew's ant drawing: (with added eyes (2) and anti-aliasing (3))
ant31x31 = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,1,1,2,2,1,1,1,2,2,1,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,2,2,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,1,0,0,0,3,1,1,1,3,0,0,0,1,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,3,1,1,1,3,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
ant15x15 = [[0,0,0,0,1,0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,1,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,2,1,2,0,0,0,0,0,0],
[0,0,0,1,0,0,1,1,1,0,0,1,0,0,0],
[0,0,0,0,1,0,0,1,0,0,1,0,0,0,0],
[0,0,0,0,0,1,1,1,1,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,0,0,0,0],
[0,0,0,1,0,0,0,1,0,0,0,1,0,0,0],
[0,0,0,0,0,1,1,1,1,1,0,0,0,0,0],
[0,0,0,0,1,0,0,1,0,0,1,0,0,0,0],
[0,0,0,1,0,0,3,1,3,0,0,1,0,0,0],
[0,0,0,0,0,0,1,1,1,0,0,0,0,0,0],
[0,0,0,0,0,0,3,1,3,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
ant7x7 = [ [0,1,0,0,0,1,0],
[0,0,2,1,2,0,0],
[0,0,0,1,0,0,0],
[0,1,1,1,1,1,0],
[0,0,0,1,0,0,0],
[0,1,1,1,1,1,0],
[0,0,0,1,0,0,0] ]
palette=[[0,0,0],[0,155,67],[127,0,255],[128,128,128],[185,184,96],[0,100,255],[196,255,254],
[254,96,255],[126,125,21],[21,126,125],[255,116,116],[116,255,116],[116,116,255],
[228,227,0],[28,255,27],[255,27,28],[0,228,227],[227,0,228],[27,28,255],[59,59,59],
[234,195,176],[175,196,255],[171,194,68],[194,68,171],[68,171,194],[72,184,71],[184,71,72],
[71,72,184],[169,255,188],[252,179,63],[63,252,179],[179,63,252],[80,9,0],[0,80,9],[9,0,80],
[255,175,250],[199,134,213],[115,100,95],[188,163,0],[0,188,163],[163,0,188],[203,73,0],
[0,203,73],[73,0,203],[94,189,0],[189,0,94]]
eyes = (255,255,255)
rotate4 = [ [[1,0],[0,1]], [[0,-1],[1,0]], [[-1,0],[0,-1]], [[0,1],[-1,0]] ]
offset4 = [ [0,0], [1,0], [1,1], [0,1] ]
pixels = [[palette[0] for column in range(total_states)*31] for row in range(53)]
for state in range(n_states):
for color in range(n_colors):
for dir in range(n_dirs):
bg_col = palette[color]
fg_col = palette[state+n_colors]
mid = [(f+b)/2 for f,b in zip(fg_col,bg_col)]
for x in range(31):
for y in range(31):
column = (encode(color,state,dir)-1)*31 + rotate4[dir][0][0]*x + \
rotate4[dir][0][1]*y + offset4[dir][0]*30
row = rotate4[dir][1][0]*x + rotate4[dir][1][1]*y + offset4[dir][1]*30
pixels[row][column] = [bg_col,fg_col,eyes,mid][ant31x31[y][x]]
for x in range(15):
for y in range(15):
column = (encode(color,state,dir)-1)*31 + rotate4[dir][0][0]*x + \
rotate4[dir][0][1]*y + offset4[dir][0]*14
row = 31 + rotate4[dir][1][0]*x + rotate4[dir][1][1]*y + offset4[dir][1]*14
pixels[row][column] = [bg_col,fg_col,eyes,mid][ant15x15[y][x]]
for x in range(7):
for y in range(7):
column = (encode(color,state,dir)-1)*31 + rotate4[dir][0][0]*x + \
rotate4[dir][0][1]*y + offset4[dir][0]*6
row = 46 + rotate4[dir][1][0]*x + rotate4[dir][1][1]*y + offset4[dir][1]*6
pixels[row][column] = [bg_col,fg_col,eyes,mid][ant7x7[y][x]]
for color in range(n_colors):
bg_col = palette[color]
for row in range(31):
for column in range(31):
pixels[row][(color-1)*31+column] = bg_col
for row in range(15):
for column in range(15):
pixels[31+row][(color-1)*31+column] = bg_col
for row in range(7):
for column in range(7):
pixels[46+row][(color-1)*31+column] = bg_col
# use rule_name.tree and icon info to create rule_name.rule
ConvertTreeToRule(rule_name, total_states, pixels)
# now we can switch to the new rule
golly.new(rule_name+' demo')
golly.setalgo('RuleLoader')
golly.setrule(rule_name)
golly.setcell(0,0,n_colors+3) # start with an ant facing west
golly.show('Created '+rule_name+'.rule and selected that rule.')
| 49.112069
| 117
| 0.537213
| 2,541
| 11,394
| 2.356946
| 0.12318
| 0.308232
| 0.41476
| 0.504258
| 0.416764
| 0.385039
| 0.346969
| 0.321924
| 0.311738
| 0.29454
| 0
| 0.207098
| 0.233368
| 11,394
| 231
| 118
| 49.324675
| 0.478535
| 0.155257
| 0
| 0.281609
| 0
| 0
| 0.021486
| 0
| 0.005747
| 0
| 0
| 0
| 0
| 1
| 0.005747
| false
| 0
| 0.017241
| 0.005747
| 0.028736
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
861a765da52f6698a03a119895841dc8ef50deaa
| 24,462
|
py
|
Python
|
sdk/eventhub/azure-eventhubs/tests/livetest/asynctests/test_eventprocessor_async.py
|
gnovack/azure-sdk-for-python
|
e539abe76ba5aa035cb2b0b793829a8e6aba09e6
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/tests/livetest/asynctests/test_eventprocessor_async.py
|
gnovack/azure-sdk-for-python
|
e539abe76ba5aa035cb2b0b793829a8e6aba09e6
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/tests/livetest/asynctests/test_eventprocessor_async.py
|
gnovack/azure-sdk-for-python
|
e539abe76ba5aa035cb2b0b793829a8e6aba09e6
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import asyncio
import time
from azure.eventhub import EventData
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub.aio._eventprocessor.event_processor import EventProcessor, CloseReason
from azure.eventhub.aio._eventprocessor.local_checkpoint_store import InMemoryCheckpointStore
from azure.eventhub.aio._eventprocessor._ownership_manager import OwnershipManager
from azure.eventhub.exceptions import OwnershipLostError, EventHubError
from azure.eventhub._client_base import _Address
TEST_NAMESPACE = "test_namespace"
TEST_EVENTHUB = "test_eventhub"
TEST_CONSUMER_GROUP = "test_consumer_group"
TEST_OWNER = "test_owner_id"
async def event_handler(partition_context, event):
pass
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_loadbalancer_balance(connstr_senders):
connection_str, senders = connstr_senders
for sender in senders:
sender.send(EventData("EventProcessor Test"))
eventhub_client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
checkpoint_store = InMemoryCheckpointStore()
tasks = []
event_processor1 = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=None,
partition_initialize_handler=None,
partition_close_handler=None,
load_balancing_interval=1)
tasks.append(asyncio.ensure_future(event_processor1.start()))
await asyncio.sleep(3)
assert len(event_processor1._tasks) == 2 # event_processor1 claims two partitions
event_processor2 = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=None,
partition_initialize_handler=None,
partition_close_handler=None,
load_balancing_interval=1)
tasks.append(asyncio.ensure_future(event_processor2.start()))
await asyncio.sleep(3)
assert len(event_processor1._tasks) == 1 # two event processors balance. So each has 1 task
assert len(event_processor2._tasks) == 1
event_processor3 = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=None,
partition_initialize_handler=None,
partition_close_handler=None,
load_balancing_interval=1)
tasks.append(asyncio.ensure_future(event_processor3.start()))
await asyncio.sleep(3)
assert len(event_processor3._tasks) == 0
await event_processor3.stop()
await event_processor1.stop()
await asyncio.sleep(3)
assert len(event_processor2._tasks) == 2 # event_procesor2 takes another one after event_processor1 stops
await event_processor2.stop()
await eventhub_client.close()
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_loadbalancer_list_ownership_error(connstr_senders):
class ErrorCheckpointStore(InMemoryCheckpointStore):
async def list_ownership(self, fully_qualified_namespace, eventhub_name, consumer_group):
raise RuntimeError("Test runtime error")
connection_str, senders = connstr_senders
for sender in senders:
sender.send(EventData("EventProcessor Test"))
eventhub_client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
checkpoint_store = ErrorCheckpointStore()
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=None,
partition_initialize_handler=None,
partition_close_handler=None,
load_balancing_interval=1)
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(5)
assert event_processor._running is True
assert len(event_processor._tasks) == 0
await event_processor.stop()
# task.cancel()
await eventhub_client.close()
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_partition_processor(connstr_senders):
lock = asyncio.Lock()
event_map = {}
checkpoint = None
close_reason = None
error = None
async def partition_initialize_handler(partition_context):
partition_initialize_handler.partition_context = partition_context
async def event_handler(partition_context, event):
async with lock:
if event:
nonlocal checkpoint, event_map
event_map[partition_context.partition_id] = event_map.get(partition_context.partition_id, 0) + 1
offset, sn = event.offset, event.sequence_number
checkpoint = (offset, sn)
await partition_context.update_checkpoint(event)
async def partition_close_handler(partition_context, reason):
assert partition_context and reason
nonlocal close_reason
close_reason = reason
async def error_handler(partition_context, err):
assert partition_context and err
nonlocal error
error = err
connection_str, senders = connstr_senders
for sender in senders:
sender.send(EventData("EventProcessor Test"))
eventhub_client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
checkpoint_store = InMemoryCheckpointStore()
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=error_handler,
partition_initialize_handler=partition_initialize_handler,
partition_close_handler=partition_close_handler,
load_balancing_interval=1)
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(10)
assert len(event_processor._tasks) == 2
await event_processor.stop()
task.cancel()
await eventhub_client.close()
assert event_map['0'] == 1 and event_map['1'] == 1
assert checkpoint is not None
assert close_reason == CloseReason.SHUTDOWN
assert error is None
assert partition_initialize_handler.partition_context
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_partition_processor_process_events_error(connstr_senders):
async def event_handler(partition_context, event):
if partition_context.partition_id == "1":
raise RuntimeError("processing events error")
else:
pass
async def error_handler(partition_context, error):
if partition_context.partition_id == "1":
error_handler.error = error
else:
raise RuntimeError("There shouldn't be an error for partition other than 1")
async def partition_close_handler(partition_context, reason):
if partition_context.partition_id == "1":
assert reason == CloseReason.OWNERSHIP_LOST
else:
assert reason == CloseReason.SHUTDOWN
connection_str, senders = connstr_senders
for sender in senders:
sender.send(EventData("EventProcessor Test"))
eventhub_client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
checkpoint_store = InMemoryCheckpointStore()
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=error_handler,
partition_initialize_handler=None,
partition_close_handler=partition_close_handler,
load_balancing_interval=1)
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(10)
await event_processor.stop()
# task.cancel()
await eventhub_client.close()
assert isinstance(error_handler.error, RuntimeError)
@pytest.mark.asyncio
async def test_partition_processor_process_eventhub_consumer_error():
async def event_handler(partition_context, event):
pass
async def error_handler(partition_context, error):
error_handler.error = error
async def partition_close_handler(partition_context, reason):
partition_close_handler.reason = reason
class MockEventHubClient(object):
eventhub_name = "test_eh_name"
def __init__(self):
self._address = _Address(hostname="test", path=MockEventHubClient.eventhub_name)
def _create_consumer(self, consumer_group, partition_id, event_position, **kwargs):
return MockEventhubConsumer(**kwargs)
async def get_partition_ids(self):
return ["0", "1"]
class MockEventhubConsumer(object):
def __init__(self, **kwargs):
self.stop = False
self._on_event_received = kwargs.get("on_event_received")
async def receive(self):
raise EventHubError("Mock EventHubConsumer EventHubError")
async def close(self):
pass
eventhub_client = MockEventHubClient()
checkpoint_store = InMemoryCheckpointStore()
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=error_handler,
partition_initialize_handler=None,
partition_close_handler=partition_close_handler,
load_balancing_interval=1)
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(5)
await event_processor.stop()
task.cancel()
assert isinstance(error_handler.error, EventHubError)
assert partition_close_handler.reason == CloseReason.OWNERSHIP_LOST
@pytest.mark.asyncio
async def test_partition_processor_process_error_close_error():
async def partition_initialize_handler(partition_context):
partition_initialize_handler.called = True
raise RuntimeError("initialize error")
async def event_handler(partition_context, event):
event_handler.called = True
raise RuntimeError("process_events error")
async def error_handler(partition_context, error):
assert isinstance(error, RuntimeError)
error_handler.called = True
raise RuntimeError("process_error error")
async def partition_close_handler(partition_context, reason):
assert reason == CloseReason.SHUTDOWN
partition_close_handler.called = True
raise RuntimeError("close error")
class MockEventHubClient(object):
eventhub_name = "test_eh_name"
def __init__(self):
self._address = _Address(hostname="test", path=MockEventHubClient.eventhub_name)
def _create_consumer(self, consumer_group, partition_id, event_position, **kwargs):
return MockEventhubConsumer(**kwargs)
async def get_partition_ids(self):
return ["0", "1"]
class MockEventhubConsumer(object):
def __init__(self, **kwargs):
self.stop = False
self._on_event_received = kwargs.get("on_event_received")
async def receive(self):
await asyncio.sleep(0.1)
await self._on_event_received(EventData("mock events"))
async def close(self):
pass
class MockOwnershipManager(OwnershipManager):
called = False
async def release_ownership(self, partition_id):
self.called = True
eventhub_client = MockEventHubClient()
checkpoint_store = InMemoryCheckpointStore()
ownership_manager = MockOwnershipManager(eventhub_client, "$Default", "owner", checkpoint_store, 10.0, "0")
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=error_handler,
partition_initialize_handler=partition_initialize_handler,
partition_close_handler=partition_close_handler,
load_balancing_interval=1)
event_processor._ownership_manager = ownership_manager
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(5)
await event_processor.stop()
# task.cancel()
assert partition_initialize_handler.called
assert event_handler.called
assert error_handler.called
assert ownership_manager.called
assert partition_close_handler.called
@pytest.mark.asyncio
async def test_ownership_manager_release_partition():
class MockEventHubClient(object):
eventhub_name = "test_eh_name"
def __init__(self):
self._address = _Address(hostname="test", path=MockEventHubClient.eventhub_name)
def _create_consumer(self, consumer_group, partition_id, event_position, **kwargs):
return MockEventhubConsumer(**kwargs)
async def get_partition_ids(self):
return ["0", "1"]
class MockCheckpointStore(InMemoryCheckpointStore):
released = None
async def claim_ownership(self, ownsership):
self.released = ownsership
checkpoint_store = MockCheckpointStore()
ownership_manager = OwnershipManager(MockEventHubClient(), "$Default", "owner", checkpoint_store, 10.0, "0")
ownership_manager.cached_parition_ids = ["0", "1"]
ownership_manager.owned_partitions = []
await ownership_manager.release_ownership("1")
assert checkpoint_store.released is None
ownership_manager.owned_partitions = [
{"partition_id": "0", "owner_id": "foo", "last_modified_time": time.time() + 31}
]
await ownership_manager.release_ownership("0")
assert checkpoint_store.released is None
ownership_manager.owned_partitions = [
{"partition_id": "0", "owner_id": "", "last_modified_time": time.time()}
]
await ownership_manager.release_ownership("0")
assert checkpoint_store.released is None
ownership_manager.owned_partitions = [{"partition_id": "0", "owner_id": "foo", "last_modified_time": time.time()}]
await ownership_manager.release_ownership("0")
assert checkpoint_store.released is None
ownership_manager.owned_partitions = [{"partition_id": "0", "owner_id": "owner", "last_modified_time": time.time()}]
await ownership_manager.release_ownership("0")
assert checkpoint_store.released[0]["owner_id"] == ""
@pytest.mark.parametrize(
"ownerships, partitions, expected_result",
[
([], ["0", "1", "2"], 3),
(['ownership_active0', 'ownership_active1'], ["0", "1", "2"], 1),
(['ownership_active0', 'ownership_expired'], ["0", "1", "2"], 2),
(['ownership_active0', 'ownership_expired', 'ownership_released'], ["0", "1", "2", "3"], 3),
(['ownership_active0'], ["0", "1", "2", "3"], 3),
(['ownership_expired', 'ownership_released'], ["0", "1", "2", "3"], 4),
(['ownership_active0', 'ownership_active1'], ["0", "1"], 0),
(['ownership_active0', 'ownership_self_owned'], ["0", "1"], 1),
]
)
def test_balance_ownership_on_init(ownerships, partitions, expected_result):
ownership_ref = {
'ownership_active0': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "0",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_0",
"last_modified_time": time.time()
},
'ownership_active1': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "1",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_1",
"last_modified_time": time.time()
},
'ownership_self_owned': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "1",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": TEST_OWNER,
"last_modified_time": time.time()
},
'ownership_expired': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "2",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_1",
"last_modified_time": time.time() - 100000
},
'ownership_released': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "3",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "",
"last_modified_time": time.time()
}
}
class MockEventHubClient(object):
eventhub_name = TEST_EVENTHUB
def __init__(self):
self._address = _Address(hostname=TEST_NAMESPACE, path=MockEventHubClient.eventhub_name)
def _create_consumer(self, consumer_group, partition_id, event_position, **kwargs):
return MockEventhubConsumer(**kwargs)
def get_partition_ids(self):
return ["0", "1"]
mock_client = MockEventHubClient()
current_ownerships = [ownership_ref[o] for o in ownerships]
om = OwnershipManager(mock_client, TEST_CONSUMER_GROUP, TEST_OWNER, None, 10, None)
om._initializing = True
to_claim_ownership = om._balance_ownership(current_ownerships, partitions)
assert len(to_claim_ownership) == expected_result
@pytest.mark.parametrize(
"ownerships, partitions, expected_result",
[
([], ["0", "1", "2"], 1),
(['ownership_active0', 'ownership_active1'], ["0", "1", "2"], 1),
(['ownership_active0', 'ownership_expired'], ["0", "1", "2"], 1),
(['ownership_active0', 'ownership_expired', 'ownership_released'], ["0", "1", "2", "3"], 1),
(['ownership_active0'], ["0", "1", "2", "3"], 1),
(['ownership_expired', 'ownership_released'], ["0", "1", "2", "3"], 1),
(['ownership_active0', 'ownership_active1'], ["0", "1"], 0),
(['ownership_active0', 'ownership_self_owned'], ["0", "1"], 1),
]
)
def test_balance_ownership(ownerships, partitions, expected_result):
ownership_ref = {
'ownership_active0': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "0",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_0",
"last_modified_time": time.time()
},
'ownership_active1': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "1",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_1",
"last_modified_time": time.time()
},
'ownership_self_owned': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "1",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": TEST_OWNER,
"last_modified_time": time.time()
},
'ownership_expired': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "2",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "owner_1",
"last_modified_time": time.time() - 100000
},
'ownership_released': {
"fully_qualified_namespace": TEST_NAMESPACE,
"partition_id": "3",
"eventhub_name": TEST_EVENTHUB,
"consumer_group": TEST_CONSUMER_GROUP,
"owner_id": "",
"last_modified_time": time.time()
}
}
class MockEventHubClient(object):
eventhub_name = TEST_EVENTHUB
def __init__(self):
self._address = _Address(hostname=TEST_NAMESPACE, path=MockEventHubClient.eventhub_name)
def _create_consumer(self, consumer_group, partition_id, event_position, **kwargs):
return MockEventhubConsumer(**kwargs)
def get_partition_ids(self):
return ["0", "1"]
mock_client = MockEventHubClient()
current_ownerships = [ownership_ref[o] for o in ownerships]
om = OwnershipManager(mock_client, TEST_CONSUMER_GROUP, TEST_OWNER, None, 10, None)
om._initializing = False
to_claim_ownership = om._balance_ownership(current_ownerships, partitions)
assert len(to_claim_ownership) == expected_result
@pytest.mark.liveTest
@pytest.mark.asyncio
async def test_partition_processor_process_update_checkpoint_error(connstr_senders):
class ErrorCheckpointStore(InMemoryCheckpointStore):
async def update_checkpoint(self, checkpoint):
if checkpoint['partition_id'] == "1":
raise OwnershipLostError("Mocked ownership lost")
async def event_handler(partition_context, event):
await partition_context.update_checkpoint(event)
async def error_handler(partition_context, error):
assert isinstance(error, OwnershipLostError)
async def partition_close_handler(partition_context, reason):
if partition_context.partition_id == "1":
assert reason == CloseReason.SHUTDOWN
partition_close_handler.called = True
connection_str, senders = connstr_senders
for sender in senders:
sender.send(EventData("EventProcessor Test"))
eventhub_client = EventHubConsumerClient.from_connection_string(connection_str, consumer_group='$default')
checkpoint_store = ErrorCheckpointStore()
event_processor = EventProcessor(eventhub_client=eventhub_client,
consumer_group='$default',
checkpoint_store=checkpoint_store,
event_handler=event_handler,
error_handler=error_handler,
partition_initialize_handler=None,
partition_close_handler=partition_close_handler,
load_balancing_interval=1)
task = asyncio.ensure_future(event_processor.start())
await asyncio.sleep(10)
await event_processor.stop()
# task.cancel()
await asyncio.sleep(1)
await eventhub_client.close()
assert partition_close_handler.called
| 41.320946
| 120
| 0.635148
| 2,392
| 24,462
| 6.159281
| 0.08403
| 0.038824
| 0.035634
| 0.028507
| 0.810969
| 0.773637
| 0.748795
| 0.733999
| 0.697618
| 0.679563
| 0
| 0.011877
| 0.270338
| 24,462
| 591
| 121
| 41.390863
| 0.813547
| 0.020644
| 0
| 0.695652
| 0
| 0
| 0.10687
| 0.010441
| 0
| 0
| 0
| 0
| 0.076605
| 1
| 0.033126
| false
| 0.010352
| 0.020704
| 0.014493
| 0.111801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86204b778ac80393a907195e96d63d39603b65ff
| 132
|
py
|
Python
|
build/lib/agrimetscraper/utils/duplicatechecker.py
|
lixiaopi1985/agrimet_scraper
|
eb566d2bbb6f1882656a1bc5319e9f35ad7dc5df
|
[
"MIT"
] | null | null | null |
build/lib/agrimetscraper/utils/duplicatechecker.py
|
lixiaopi1985/agrimet_scraper
|
eb566d2bbb6f1882656a1bc5319e9f35ad7dc5df
|
[
"MIT"
] | null | null | null |
build/lib/agrimetscraper/utils/duplicatechecker.py
|
lixiaopi1985/agrimet_scraper
|
eb566d2bbb6f1882656a1bc5319e9f35ad7dc5df
|
[
"MIT"
] | null | null | null |
"""used to check if data existed in database already
"""
def duplicate_checker(tuple1,list_all):
return tuple1 in list_all
| 13.2
| 52
| 0.734848
| 20
| 132
| 4.7
| 0.8
| 0.148936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.189394
| 132
| 9
| 53
| 14.666667
| 0.859813
| 0.371212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
863edadd0dfcd82c58c5c28068403800d1c36825
| 27,820
|
py
|
Python
|
tests/components/plex/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/plex/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/plex/test_config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for Plex config flow."""
import copy
import ssl
from unittest.mock import patch
import plexapi.exceptions
import pytest
import requests.exceptions
from openpeerpower.components.media_player import DOMAIN as MP_DOMAIN
from openpeerpower.components.plex import config_flow
from openpeerpower.components.plex.const import (
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
)
from openpeerpower.config_entries import (
SOURCE_INTEGRATION_DISCOVERY,
SOURCE_REAUTH,
SOURCE_USER,
ConfigEntryState,
)
from openpeerpower.const import (
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from openpeerpower.setup import async_setup_component
from .const import DEFAULT_OPTIONS, MOCK_SERVERS, MOCK_TOKEN, PLEX_DIRECT_URL
from .helpers import trigger_plex_update, wait_for_debouncer
from .mock_classes import MockGDM
from tests.common import MockConfigEntry
async def test_bad_credentials(opp, current_request_with_host):
"""Test when provided credentials are rejected."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"plexapi.myplex.MyPlexAccount", side_effect=plexapi.exceptions.Unauthorized
), patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value="BAD TOKEN"
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"][CONF_TOKEN] == "faulty_credentials"
async def test_bad_hostname(opp, mock_plex_calls, current_request_with_host):
"""Test when an invalid address is provided."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"plexapi.myplex.MyPlexResource.connect",
side_effect=requests.exceptions.ConnectionError,
), patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"][CONF_HOST] == "not_found"
async def test_unknown_exception(opp, current_request_with_host):
"""Test when an unknown exception is encountered."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexapi.myplex.MyPlexAccount", side_effect=Exception), patch(
"plexauth.PlexAuth.initiate_auth"
), patch("plexauth.PlexAuth.token", return_value="MOCK_TOKEN"):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_no_servers_found(
opp, mock_plex_calls, requests_mock, empty_payload, current_request_with_host
):
"""Test when no servers are on an account."""
requests_mock.get("https://plex.tv/api/resources", text=empty_payload)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "no_servers"
async def test_single_available_server(opp, mock_plex_calls, current_request_with_host):
"""Test creating an entry with one server available."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = opp.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_multiple_servers_with_selection(
opp,
mock_plex_calls,
requests_mock,
plextv_resources_base,
current_request_with_host,
):
"""Test creating an entry with multiple servers available."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(second_server_enabled=1),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "select_server"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_SERVER: MOCK_SERVERS[0][CONF_SERVER]},
)
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = opp.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_adding_last_unconfigured_server(
opp,
mock_plex_calls,
requests_mock,
plextv_resources_base,
current_request_with_host,
):
"""Test automatically adding last unconfigured server when multiple servers on account."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_SERVER_IDENTIFIER: MOCK_SERVERS[1][CONF_SERVER_IDENTIFIER],
CONF_SERVER: MOCK_SERVERS[1][CONF_SERVER],
},
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(second_server_enabled=1),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = opp.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_all_available_servers_configured(
opp,
entry,
requests_mock,
plextv_account,
plextv_resources_base,
current_request_with_host,
):
"""Test when all available servers are already configured."""
entry.add_to_opp(opp)
MockConfigEntry(
domain=DOMAIN,
data={
CONF_SERVER_IDENTIFIER: MOCK_SERVERS[1][CONF_SERVER_IDENTIFIER],
CONF_SERVER: MOCK_SERVERS[1][CONF_SERVER],
},
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get("https://plex.tv/users/account", text=plextv_account)
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(second_server_enabled=1),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "all_configured"
async def test_option_flow(opp, entry, mock_plex_server):
"""Test config options flow selection."""
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
result = await opp.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
result = await opp.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: list(mock_plex_server.accounts),
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
MP_DOMAIN: {
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: {
user: {"enabled": True} for user in mock_plex_server.accounts
},
CONF_IGNORE_PLEX_WEB_CLIENTS: False,
}
}
async def test_missing_option_flow(opp, entry, mock_plex_server):
"""Test config options flow selection when no options stored."""
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
result = await opp.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
result = await opp.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: list(mock_plex_server.accounts),
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
MP_DOMAIN: {
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: {
user: {"enabled": True} for user in mock_plex_server.accounts
},
CONF_IGNORE_PLEX_WEB_CLIENTS: False,
}
}
async def test_option_flow_new_users_available(opp, entry, setup_plex_server):
"""Test config options multiselect defaults when new Plex users are seen."""
OPTIONS_OWNER_ONLY = copy.deepcopy(DEFAULT_OPTIONS)
OPTIONS_OWNER_ONLY[MP_DOMAIN][CONF_MONITORED_USERS] = {"User 1": {"enabled": True}}
entry.options = OPTIONS_OWNER_ONLY
mock_plex_server = await setup_plex_server(config_entry=entry)
await opp.async_block_till_done()
server_id = mock_plex_server.machine_identifier
monitored_users = opp.data[DOMAIN][SERVERS][server_id].option_monitored_users
new_users = [x for x in mock_plex_server.accounts if x not in monitored_users]
assert len(monitored_users) == 1
assert len(new_users) == 2
result = await opp.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
multiselect_defaults = result["data_schema"].schema["monitored_users"].options
assert "[Owner]" in multiselect_defaults["User 1"]
for user in new_users:
assert "[New]" in multiselect_defaults[user]
async def test_external_timed_out(opp, current_request_with_host):
"""Test when external flow times out."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "token_request_timeout"
async def test_callback_view(opp, aiohttp_client, current_request_with_host):
"""Test callback view."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
client = await aiohttp_client(opp.http.app)
forward_url = f'{config_flow.AUTH_CALLBACK_PATH}?flow_id={result["flow_id"]}'
resp = await client.get(forward_url)
assert resp.status == 200
async def test_manual_config(opp, mock_plex_calls, current_request_with_host):
"""Test creating via manual configuration."""
class WrongCertValidaitionException(requests.exceptions.SSLError):
"""Mock the exception showing an unmatched error."""
def __init__(self):
self.__context__ = ssl.SSLCertVerificationError(
"some random message that doesn't match"
)
# Basic mode
result = await opp.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["data_schema"] is None
opp.config_entries.flow.async_abort(result["flow_id"])
# Advanced automatic
result = await opp.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["data_schema"] is not None
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
with patch("plexauth.PlexAuth.initiate_auth"):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": AUTOMATIC_SETUP_STRING}
)
assert result["type"] == "external"
opp.config_entries.flow.async_abort(result["flow_id"])
# Advanced manual
result = await opp.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["data_schema"] is not None
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": MANUAL_SETUP_STRING}
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
MANUAL_SERVER = {
CONF_HOST: MOCK_SERVERS[0][CONF_HOST],
CONF_PORT: MOCK_SERVERS[0][CONF_PORT],
CONF_SSL: False,
CONF_VERIFY_SSL: True,
CONF_TOKEN: MOCK_TOKEN,
}
MANUAL_SERVER_NO_HOST_OR_TOKEN = {
CONF_PORT: MOCK_SERVERS[0][CONF_PORT],
CONF_SSL: False,
CONF_VERIFY_SSL: True,
}
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER_NO_HOST_OR_TOKEN
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "host_or_token"
with patch(
"plexapi.server.PlexServer",
side_effect=requests.exceptions.SSLError,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch(
"plexapi.server.PlexServer",
side_effect=WrongCertValidaitionException,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch(
"openpeerpower.components.plex.PlexServer.connect",
side_effect=requests.exceptions.SSLError,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch("openpeerpower.components.plex.PlexWebsocket", autospec=True), patch(
"openpeerpower.components.plex.GDM", return_value=MockGDM(disabled=True)
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
await opp.async_block_till_done()
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = opp.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert result["data"][CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_manual_config_with_token(opp, mock_plex_calls):
"""Test creating via manual configuration with only token."""
result = await opp.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": MANUAL_SETUP_STRING}
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
with patch(
"openpeerpower.components.plex.GDM", return_value=MockGDM(disabled=True)
), patch("openpeerpower.components.plex.PlexWebsocket", autospec=True):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_TOKEN: MOCK_TOKEN}
)
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = opp.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert result["data"][CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_setup_with_limited_credentials(opp, entry, setup_plex_server):
"""Test setup with a user with limited permissions."""
with patch(
"plexapi.server.PlexServer.systemAccounts",
side_effect=plexapi.exceptions.Unauthorized,
) as mock_accounts:
mock_plex_server = await setup_plex_server()
assert mock_accounts.called
plex_server = opp.data[DOMAIN][SERVERS][mock_plex_server.machine_identifier]
assert len(plex_server.accounts) == 0
assert plex_server.owner is None
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
async def test_integration_discovery(opp):
"""Test integration self-discovery."""
mock_gdm = MockGDM()
with patch("openpeerpower.components.plex.config_flow.GDM", return_value=mock_gdm):
await config_flow.async_discover(opp)
flows = opp.config_entries.flow.async_progress()
assert len(flows) == 1
flow = flows[0]
assert flow["handler"] == DOMAIN
assert flow["context"]["source"] == SOURCE_INTEGRATION_DISCOVERY
assert (
flow["context"]["unique_id"]
== mock_gdm.entries[0]["data"]["Resource-Identifier"]
)
assert flow["step_id"] == "user"
async def test_trigger_reauth(
opp, entry, mock_plex_server, mock_websocket, current_request_with_host
):
"""Test setup and reauthorization of a Plex token."""
await async_setup_component(opp, "persistent_notification", {})
assert entry.state is ConfigEntryState.LOADED
with patch(
"plexapi.server.PlexServer.clients", side_effect=plexapi.exceptions.Unauthorized
), patch("plexapi.server.PlexServer", side_effect=plexapi.exceptions.Unauthorized):
trigger_plex_update(mock_websocket)
await wait_for_debouncer(opp)
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is not ConfigEntryState.LOADED
flows = opp.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["context"]["source"] == SOURCE_REAUTH
flow_id = flows[0]["flow_id"]
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value="BRAND_NEW_TOKEN"
):
result = await opp.config_entries.flow.async_configure(flow_id, user_input={})
assert result["type"] == "external"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await opp.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "reauth_successful"
assert result["flow_id"] == flow_id
assert len(opp.config_entries.flow.async_progress()) == 0
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert entry.data[CONF_SERVER] == mock_plex_server.friendly_name
assert entry.data[CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert entry.data[PLEX_SERVER_CONFIG][CONF_URL] == PLEX_DIRECT_URL
assert entry.data[PLEX_SERVER_CONFIG][CONF_TOKEN] == "BRAND_NEW_TOKEN"
async def test_client_request_missing(opp):
"""Test when client headers are not set properly."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
):
with pytest.raises(RuntimeError):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
async def test_client_header_issues(opp, current_request_with_host):
"""Test when client headers are not set properly."""
class MockRequest:
headers = {}
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
), patch(
"openpeerpower.components.http.current_request.get", return_value=MockRequest()
):
with pytest.raises(RuntimeError):
result = await opp.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
| 35.349428
| 94
| 0.674299
| 3,360
| 27,820
| 5.295536
| 0.077083
| 0.089698
| 0.066543
| 0.071938
| 0.798235
| 0.764851
| 0.735345
| 0.712471
| 0.70539
| 0.695948
| 0
| 0.001404
| 0.20629
| 27,820
| 786
| 95
| 35.394402
| 0.804402
| 0.004349
| 0
| 0.634267
| 0
| 0
| 0.143442
| 0.053899
| 0
| 0
| 0
| 0
| 0.270181
| 1
| 0.001647
| false
| 0
| 0.026359
| 0
| 0.032949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
864651c6e5194012a49d961e5509cf3e0228f203
| 93
|
py
|
Python
|
backend/apps/currency/admin.py
|
jorgejimenez98/backend-evaluacion-desempenno
|
08975303952608809375c5e2185bf20a84cc0f4e
|
[
"MIT"
] | null | null | null |
backend/apps/currency/admin.py
|
jorgejimenez98/backend-evaluacion-desempenno
|
08975303952608809375c5e2185bf20a84cc0f4e
|
[
"MIT"
] | null | null | null |
backend/apps/currency/admin.py
|
jorgejimenez98/backend-evaluacion-desempenno
|
08975303952608809375c5e2185bf20a84cc0f4e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Currency
admin.site.register(Currency)
| 18.6
| 32
| 0.827957
| 13
| 93
| 5.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107527
| 93
| 4
| 33
| 23.25
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8653b0571f51c0d778712ce6a264a5e4e27a4cce
| 357
|
py
|
Python
|
app/modules/core/tests/test_time_tags.py
|
nickmoreton/nhsx-website
|
2397d1308376c02b75323d30e6bc916af0daac9d
|
[
"MIT"
] | 50
|
2019-04-04T17:50:00.000Z
|
2021-08-05T15:08:37.000Z
|
app/modules/core/tests/test_time_tags.py
|
nickmoreton/nhsx-website
|
2397d1308376c02b75323d30e6bc916af0daac9d
|
[
"MIT"
] | 434
|
2019-04-04T18:25:32.000Z
|
2022-03-31T18:23:37.000Z
|
app/modules/core/tests/test_time_tags.py
|
nhsx-mirror/nhsx-website
|
2133b4e275ca35ff77f7d6874e809f139ec4bf86
|
[
"MIT"
] | 23
|
2019-04-04T09:52:07.000Z
|
2021-04-11T07:41:47.000Z
|
from modules.core.templatetags import time_tags
import datetime
def test_format_time():
assert time_tags.format_time(datetime.time(12, 0)) == "midday"
assert time_tags.format_time(datetime.time(0, 0)) == "midnight"
assert time_tags.format_time(datetime.time(15, 30)) == "3.30pm"
assert time_tags.format_time(datetime.time(15, 0)) == "3pm"
| 35.7
| 67
| 0.731092
| 54
| 357
| 4.62963
| 0.388889
| 0.16
| 0.224
| 0.32
| 0.592
| 0.592
| 0.592
| 0.304
| 0
| 0
| 0
| 0.051447
| 0.128852
| 357
| 9
| 68
| 39.666667
| 0.752412
| 0
| 0
| 0
| 0
| 0
| 0.064426
| 0
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0.142857
| true
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
865c6c1d97a6a50724fe87a96dbfea62bcc56438
| 39
|
py
|
Python
|
tests/components/tplink/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
tests/components/tplink/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
tests/components/tplink/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Tests for the TP-Link component."""
| 19.5
| 38
| 0.666667
| 6
| 39
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 1
| 39
| 39
| 0.764706
| 0.820513
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8680468416037e610a5589f554376faa8971cb38
| 11
|
py
|
Python
|
helloworld.py
|
abhaykoushal/test
|
dbda82fdb0743d161b8c593a9b4fccd244fd628b
|
[
"MIT"
] | null | null | null |
helloworld.py
|
abhaykoushal/test
|
dbda82fdb0743d161b8c593a9b4fccd244fd628b
|
[
"MIT"
] | null | null | null |
helloworld.py
|
abhaykoushal/test
|
dbda82fdb0743d161b8c593a9b4fccd244fd628b
|
[
"MIT"
] | null | null | null |
print(2/1)
| 5.5
| 10
| 0.636364
| 3
| 11
| 2.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.090909
| 11
| 1
| 11
| 11
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
86b0fc16a83a8b9527ace118f40d3c55168e0f70
| 194
|
py
|
Python
|
edifact/messages/un.py
|
FriedrichK/python-edifact
|
77a7cf3d053fb4dbb21e61559d603ab609f96e19
|
[
"Apache-2.0"
] | 5
|
2016-04-28T06:04:28.000Z
|
2022-02-15T12:23:47.000Z
|
edifact/messages/un.py
|
FriedrichK/python-edifact
|
77a7cf3d053fb4dbb21e61559d603ab609f96e19
|
[
"Apache-2.0"
] | null | null | null |
edifact/messages/un.py
|
FriedrichK/python-edifact
|
77a7cf3d053fb4dbb21e61559d603ab609f96e19
|
[
"Apache-2.0"
] | 6
|
2016-12-14T11:47:12.000Z
|
2020-05-16T14:37:31.000Z
|
import edifact.configuration
from edifact.messages.base import Message
class MSCONS(Message):
class Meta:
spec = 'MSCONS'
edifact.configuration.MESSAGE_CLASSES['MSCONS'] = MSCONS
| 19.4
| 56
| 0.752577
| 22
| 194
| 6.590909
| 0.545455
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159794
| 194
| 9
| 57
| 21.555556
| 0.889571
| 0
| 0
| 0
| 0
| 0
| 0.061856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86b6ce82bd51d63cb86c16bd72f79b7a4b043556
| 10,676
|
py
|
Python
|
micronet/compression/quantization/wqaq/dorefa/quantize.py
|
chenjun2hao/micronet
|
c8b148a808dc1f2dbfda948e7897692182f1d722
|
[
"MIT"
] | 1
|
2021-03-02T03:44:44.000Z
|
2021-03-02T03:44:44.000Z
|
micronet/compression/quantization/wqaq/dorefa/quantize.py
|
chenjun2hao/micronet
|
c8b148a808dc1f2dbfda948e7897692182f1d722
|
[
"MIT"
] | null | null | null |
micronet/compression/quantization/wqaq/dorefa/quantize.py
|
chenjun2hao/micronet
|
c8b148a808dc1f2dbfda948e7897692182f1d722
|
[
"MIT"
] | null | null | null |
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
# ********************* quantizers(量化器,量化) *********************
# 取整(ste)
class Round(Function):
@staticmethod
def forward(self, input):
output = torch.round(input)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
# A(特征)量化
class ActivationQuantizer(nn.Module):
def __init__(self, a_bits):
super(ActivationQuantizer, self).__init__()
self.a_bits = a_bits
# 取整(ste)
def round(self, input):
output = Round.apply(input)
return output
# 量化/反量化
def forward(self, input):
if self.a_bits == 32:
output = input
elif self.a_bits == 1:
print('!Binary quantization is not supported !')
assert self.a_bits != 1
else:
output = torch.clamp(input * 0.1, 0, 1) # 特征A截断前先进行缩放(* 0.1),以减小截断误差
scale = 1 / float(2 ** self.a_bits - 1) # scale
output = self.round(output / scale) * scale # 量化/反量化
return output
# W(权重)量化
class WeightQuantizer(nn.Module):
def __init__(self, w_bits):
super(WeightQuantizer, self).__init__()
self.w_bits = w_bits
# 取整(ste)
def round(self, input):
output = Round.apply(input)
return output
# 量化/反量化
def forward(self, input):
if self.w_bits == 32:
output = input
elif self.w_bits == 1:
print('!Binary quantization is not supported !')
assert self.w_bits != 1
else:
output = torch.tanh(input)
output = output / 2 / torch.max(torch.abs(output)) + 0.5 # 归一化-[0,1]
scale = 1 / float(2 ** self.w_bits - 1) # scale
output = self.round(output / scale) * scale # 量化/反量化
output = 2 * output - 1
return output
class QuantConv2d(nn.Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
a_bits=8,
w_bits=8,
quant_inference=False):
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups,
bias, padding_mode)
self.quant_inference = quant_inference
self.activation_quantizer = ActivationQuantizer(a_bits=a_bits)
self.weight_quantizer = WeightQuantizer(w_bits=w_bits)
def forward(self, input):
quant_input = self.activation_quantizer(input)
quant_input = input
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.conv2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.dilation,
self.groups)
return output
class QuantConvTranspose2d(nn.ConvTranspose2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
a_bits=8,
w_bits=8,
quant_inference=False):
super(QuantConvTranspose2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, output_padding,
dilation, groups, bias, padding_mode)
self.quant_inference = quant_inference
self.activation_quantizer = ActivationQuantizer(a_bits=a_bits)
self.weight_quantizer = WeightQuantizer(w_bits=w_bits)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.conv_transpose2d(quant_input, quant_weight, self.bias, self.stride, self.padding, self.output_padding,
self.groups, self.dilation)
return output
class QuantLinear(nn.Linear):
def __init__(self,
in_features,
out_features,
bias=True,
a_bits=8,
w_bits=8,
quant_inference=False):
super(QuantLinear, self).__init__(in_features, out_features, bias)
self.quant_inference = quant_inference
self.activation_quantizer = ActivationQuantizer(a_bits=a_bits)
self.weight_quantizer = WeightQuantizer(w_bits=w_bits)
def forward(self, input):
quant_input = self.activation_quantizer(input)
if not self.quant_inference:
quant_weight = self.weight_quantizer(self.weight)
else:
quant_weight = self.weight
output = F.linear(quant_input, quant_weight, self.bias)
return output
def add_quant_op(module, layer_counter, a_bits=8, w_bits=8,
quant_inference=False):
for name, child in module.named_children():
if isinstance(child, nn.Conv2d):
layer_counter[0] += 1
if layer_counter[0] > 1:
if child.bias is not None:
quant_conv = QuantConv2d(child.in_channels, child.out_channels,
child.kernel_size, stride=child.stride,
padding=child.padding, dilation=child.dilation,
groups=child.groups, bias=True, padding_mode=child.padding_mode,
a_bits=a_bits, w_bits=w_bits, quant_inference=quant_inference)
quant_conv.bias.data = child.bias
else:
quant_conv = QuantConv2d(child.in_channels, child.out_channels,
child.kernel_size, stride=child.stride,
padding=child.padding, dilation=child.dilation,
groups=child.groups, bias=False, padding_mode=child.padding_mode,
a_bits=a_bits, w_bits=w_bits, quant_inference=quant_inference)
quant_conv.weight.data = child.weight
module._modules[name] = quant_conv
elif isinstance(child, nn.ConvTranspose2d):
layer_counter[0] += 1
if layer_counter[0] > 1:
if child.bias is not None:
quant_conv_transpose = QuantConvTranspose2d(child.in_channels,
child.out_channels,
child.kernel_size,
stride=child.stride,
padding=child.padding,
output_padding=child.output_padding,
dilation=child.dilation,
groups=child.groups,
bias=True,
padding_mode=child.padding_mode,
a_bits=a_bits,
w_bits=w_bits,
quant_inference=quant_inference)
quant_conv_transpose.bias.data = child.bias
else:
quant_conv_transpose = QuantConvTranspose2d(child.in_channels,
child.out_channels,
child.kernel_size,
stride=child.stride,
padding=child.padding,
output_padding=child.output_padding,
dilation=child.dilation,
groups=child.groups, bias=False,
padding_mode=child.padding_mode,
a_bits=a_bits,
w_bits=w_bits,
quant_inference=quant_inference)
quant_conv_transpose.weight.data = child.weight
module._modules[name] = quant_conv_transpose
elif isinstance(child, nn.Linear):
layer_counter[0] += 1
if layer_counter[0] > 1:
if child.bias is not None:
quant_linear = QuantLinear(child.in_features, child.out_features,
bias=True, a_bits=a_bits, w_bits=w_bits,
quant_inference=quant_inference)
quant_linear.bias.data = child.bias
else:
quant_linear = QuantLinear(child.in_features, child.out_features,
bias=False, a_bits=a_bits, w_bits=w_bits,
quant_inference=quant_inference)
quant_linear.weight.data = child.weight
module._modules[name] = quant_linear
else:
add_quant_op(child, layer_counter, a_bits=a_bits, w_bits=w_bits,
quant_inference=quant_inference)
def prepare(model, inplace=False, a_bits=8, w_bits=8, quant_inference=False):
if not inplace:
model = copy.deepcopy(model)
layer_counter = [0]
add_quant_op(model, layer_counter, a_bits=a_bits, w_bits=w_bits,
quant_inference=quant_inference)
return model
| 44.483333
| 123
| 0.492038
| 1,023
| 10,676
| 4.878788
| 0.107527
| 0.034061
| 0.036065
| 0.024043
| 0.773993
| 0.744941
| 0.720297
| 0.711881
| 0.703266
| 0.67201
| 0
| 0.012068
| 0.433402
| 10,676
| 239
| 124
| 44.669456
| 0.813027
| 0.016767
| 0
| 0.644231
| 0
| 0
| 0.008395
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 1
| 0.076923
| false
| 0
| 0.024038
| 0
| 0.177885
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86cd5d983d112322a969d6ec76ba1f5e8b17eb26
| 152
|
py
|
Python
|
models/__init__.py
|
miguelhatrick/climbing_gym_school
|
d2ebaab2450bf19e5318b38d5ec565b9e185d28d
|
[
"Apache-2.0"
] | null | null | null |
models/__init__.py
|
miguelhatrick/climbing_gym_school
|
d2ebaab2450bf19e5318b38d5ec565b9e185d28d
|
[
"Apache-2.0"
] | null | null | null |
models/__init__.py
|
miguelhatrick/climbing_gym_school
|
d2ebaab2450bf19e5318b38d5ec565b9e185d28d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import career, course, course_student, course_type, exam, exam_student
from . import pos_order, sale_order, res_partner
| 30.4
| 77
| 0.743421
| 22
| 152
| 4.863636
| 0.681818
| 0.186916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.138158
| 152
| 4
| 78
| 38
| 0.80916
| 0.138158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86f07820c7d0a30a253f51404ae75a6ea5dddd8a
| 25,032
|
py
|
Python
|
scripts/file_mypyvy_to_json.py
|
b1f6c1c4/cfg-enum
|
1a08071bde87f578ceaf834004c01b593db9bce8
|
[
"BSD-3-Clause"
] | 4
|
2021-06-11T07:34:50.000Z
|
2022-03-30T15:32:07.000Z
|
scripts/file_mypyvy_to_json.py
|
b1f6c1c4/cfg-enum
|
1a08071bde87f578ceaf834004c01b593db9bce8
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/file_mypyvy_to_json.py
|
b1f6c1c4/cfg-enum
|
1a08071bde87f578ceaf834004c01b593db9bce8
|
[
"BSD-3-Clause"
] | 2
|
2021-11-16T09:13:46.000Z
|
2021-12-20T14:53:57.000Z
|
import z3 # pip3 install z3-solver
import sys
import json
import os
import argparse
from typing import cast
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(__file__), '../mypyvy/src')))
import mypyvy
import utils
import parser
#import typechecker
import syntax
"""
def parse_program(input: str, force_rebuild: bool = False, filename: Optional[str] = None) -> Program:
l = parser.get_lexer()
p = parser.get_parser(forbid_rebuild=force_rebuild)
return p.parse(input=input, lexer=l, filename=filename)
def parse_args(args: List[str]) -> utils.MypyvyArgs:
argparser = argparse.ArgumentParser()
subparsers = argparser.add_subparsers(title='subcommands', dest='subcommand')
all_subparsers = []
verify_subparser = subparsers.add_parser('verify', help='verify that the invariants are inductive')
verify_subparser.set_defaults(main=verify)
all_subparsers.append(verify_subparser)
updr_subparser = subparsers.add_parser('updr', help='search for a strengthening that proves the invariant named by the --safety=NAME flag')
updr_subparser.set_defaults(main=do_updr)
all_subparsers.append(updr_subparser)
bmc_subparser = subparsers.add_parser('bmc', help='bounded model check to depth given by the --depth=DEPTH flag for property given by the --safety=NAME flag')
bmc_subparser.set_defaults(main=bmc)
all_subparsers.append(bmc_subparser)
theorem_subparser = subparsers.add_parser('theorem', help='check state-independent theorems about the background axioms of a model')
theorem_subparser.set_defaults(main=theorem)
all_subparsers.append(theorem_subparser)
trace_subparser = subparsers.add_parser('trace', help='search for concrete executions that satisfy query described by the file\'s trace declaration')
trace_subparser.set_defaults(main=trace)
all_subparsers.append(trace_subparser)
generate_parser_subparser = subparsers.add_parser('generate-parser', help='internal command used by benchmarking infrastructure to avoid certain race conditions')
generate_parser_subparser.set_defaults(main=nop) # parser is generated implicitly by main when it parses the program
all_subparsers.append(generate_parser_subparser)
typecheck_subparser = subparsers.add_parser('typecheck', help='typecheck the file, report any errors, and exit')
typecheck_subparser.set_defaults(main=nop) # program is always typechecked; no further action required
all_subparsers.append(typecheck_subparser)
relax_subparser = subparsers.add_parser('relax', help='produce a version of the file that is "relaxed", in a way that is indistinguishable for universal invariants')
relax_subparser.set_defaults(main=relax)
all_subparsers.append(relax_subparser)
all_subparsers += pd.add_argparsers(subparsers)
all_subparsers += pd_fol.add_argparsers(subparsers)
for s in all_subparsers:
s.add_argument('--forbid-parser-rebuild', action=utils.YesNoAction, default=False,
help='force loading parser from disk (helps when running mypyvy from multiple processes)')
s.add_argument('--log', default='warning', choices=['error', 'warning', 'info', 'debug'],
help='logging level')
s.add_argument('--log-time', action=utils.YesNoAction, default=False,
help='make each log message include current time')
s.add_argument('--log-xml', action=utils.YesNoAction, default=False,
help='log in XML format')
s.add_argument('--seed', type=int, default=0, help="value for z3's smt.random_seed")
s.add_argument('--print-program-repr', action=utils.YesNoAction, default=False,
help='print a machine-readable representation of the program after parsing')
s.add_argument('--print-program', action=utils.YesNoAction, default=False,
help='print the program after parsing')
s.add_argument('--key-prefix',
help='additional string to use in front of names sent to z3')
s.add_argument('--minimize-models', action=utils.YesNoAction, default=True,
help='search for models with minimal cardinality')
s.add_argument('--timeout', type=int, default=None,
help='z3 timeout (milliseconds)')
s.add_argument('--exit-on-error', action=utils.YesNoAction, default=False,
help='exit after reporting first error')
s.add_argument('--ipython', action=utils.YesNoAction, default=False,
help='run IPython with s and prog at the end')
s.add_argument('--error-filename-basename', action=utils.YesNoAction, default=False,
help='print only the basename of the input file in error messages')
s.add_argument('--query-time', action=utils.YesNoAction, default=True,
help='report how long various z3 queries take')
s.add_argument('--print-counterexample', action=utils.YesNoAction, default=True,
help='print counterexamples')
s.add_argument('--print-cmdline', action=utils.YesNoAction, default=True,
help='print the command line passed to mypyvy')
s.add_argument('--clear-cache', action=utils.YesNoAction, default=False,
help='do not load from cache, but dump to cache as usual (effectively clearing the cache before starting)')
s.add_argument('--clear-cache-memo', action=utils.YesNoAction, default=False,
help='load only discovered states from the cache, but dump to cache as usual (effectively clearing the memoization cache before starting, while keeping discovered states and transitions)')
s.add_argument('--cache-only', action=utils.YesNoAction, default=False,
help='assert that the caches already contain all the answers')
s.add_argument('--cache-only-discovered', action=utils.YesNoAction, default=False,
help='assert that the discovered states already contain all the answers')
s.add_argument('--print-exit-code', action=utils.YesNoAction, default=False,
help='print the exit code before exiting (good for regression testing)')
s.add_argument('--cvc4', action='store_true',
help='use CVC4 as the backend solver. this is not very well supported.')
# for diagrams:
s.add_argument('--simplify-diagram', action=utils.YesNoAction,
default=(s is updr_subparser),
default_description='yes for updr, else no',
help='in diagram generation, substitute existentially quantified variables that are equal to constants')
s.add_argument('--diagrams-subclause-complete', action=utils.YesNoAction, default=False,
help='in diagram generation, "complete" the diagram so that every stronger '
'clause is a subclause')
updr_subparser.add_argument('--use-z3-unsat-cores', action=utils.YesNoAction, default=True,
help='generalize diagrams using brute force instead of unsat cores')
updr_subparser.add_argument('--smoke-test', action=utils.YesNoAction, default=False,
help='(for debugging mypyvy itself) run bmc to confirm every conjunct added to a frame')
updr_subparser.add_argument('--assert-inductive-trace', action=utils.YesNoAction, default=False,
help='(for debugging mypyvy itself) check that frames are always inductive')
updr_subparser.add_argument('--sketch', action=utils.YesNoAction, default=False,
help='use sketched invariants as additional safety (currently only in automaton)')
updr_subparser.add_argument('--automaton', action=utils.YesNoAction, default=False,
help='whether to run vanilla UPDR or phase UPDR')
updr_subparser.add_argument('--block-may-cexs', action=utils.YesNoAction, default=False,
help="treat failures to push as additional proof obligations")
updr_subparser.add_argument('--push-frame-zero', default='if_trivial', choices=['if_trivial', 'always', 'never'],
help="push lemmas from the initial frame: always/never/if_trivial, the latter is when there is more than one phase")
verify_subparser.add_argument('--automaton', default='yes', choices=['yes', 'no', 'only'],
help="whether to use phase automata during verification. by default ('yes'), both non-automaton "
"and automaton proofs are checked. 'no' means ignore automaton proofs. "
"'only' means ignore non-automaton proofs.")
verify_subparser.add_argument('--check-transition', default=None, nargs='+',
help="when verifying inductiveness, check only these transitions")
verify_subparser.add_argument('--check-invariant', default=None, nargs='+',
help="when verifying inductiveness, check only these invariants")
verify_subparser.add_argument('--json', action='store_true',
help="output machine-parseable verification results in JSON format")
verify_subparser.add_argument('--smoke-test-solver', action=utils.YesNoAction, default=False,
help='(for debugging mypyvy itself) double check countermodels by evaluation')
updr_subparser.add_argument('--checkpoint-in',
help='start from internal state as stored in given file')
updr_subparser.add_argument('--checkpoint-out',
help='store internal state to given file') # TODO: say when
bmc_subparser.add_argument('--safety', help='property to check')
bmc_subparser.add_argument('--depth', type=int, default=3, metavar='N',
help='number of steps to check')
argparser.add_argument('filename')
return cast(utils.MypyvyArgs, argparser.parse_args(args))
"""
"""
def parse_program(input, filename = None):
l = parser.get_lexer()
p = parser.get_parser(forbid_rebuild=False)
prog = p.parse(input=input, lexer=l, filename=filename)
prog.input = input
return prog
# copied from mypyvy/src/mypyvy.py
def parse_args(args):
argparser = argparse.ArgumentParser()
subparsers = argparser.add_subparsers(title='subcommands', dest='subcommand')
all_subparsers = []
verify_subparser = subparsers.add_parser('verify', help='verify that the invariants are inductive')
#verify_subparser.set_defaults(main=verify)
all_subparsers.append(verify_subparser)
updr_subparser = subparsers.add_parser(
'updr',
help='search for a strengthening that proves the invariant named by the --safety=NAME flag')
#updr_subparser.set_defaults(main=do_updr)
all_subparsers.append(updr_subparser)
bmc_subparser = subparsers.add_parser(
'bmc',
help='bounded model check to depth given by the --depth=DEPTH flag '
'for property given by the --safety=NAME flag')
#bmc_subparser.set_defaults(main=bmc)
all_subparsers.append(bmc_subparser)
theorem_subparser = subparsers.add_parser(
'theorem',
help='check state-independent theorems about the background axioms of a model')
#theorem_subparser.set_defaults(main=theorem)
all_subparsers.append(theorem_subparser)
trace_subparser = subparsers.add_parser(
'trace',
help='search for concrete executions that satisfy query described by the file\'s trace declaration')
#trace_subparser.set_defaults(main=trace)
all_subparsers.append(trace_subparser)
generate_parser_subparser = subparsers.add_parser(
'generate-parser',
help='internal command used by benchmarking infrastructure to avoid certain race conditions')
# parser is generated implicitly by main when it parses the program, so we can just nop here
#generate_parser_subparser.set_defaults(main=nop)
all_subparsers.append(generate_parser_subparser)
typecheck_subparser = subparsers.add_parser('typecheck', help='typecheck the file, report any errors, and exit')
#typecheck_subparser.set_defaults(main=nop) # program is always typechecked; no further action required
all_subparsers.append(typecheck_subparser)
relax_subparser = subparsers.add_parser(
'relax',
help='produce a version of the file that is "relaxed", '
'in a way that is indistinguishable for universal invariants')
#relax_subparser.set_defaults(main=relax)
all_subparsers.append(relax_subparser)
check_one_bounded_width_invariant_parser = subparsers.add_parser(
'check-one-bounded-width-invariant',
help='popl'
)
#check_one_bounded_width_invariant_parser.set_defaults(main=check_one_bounded_width_invariant)
all_subparsers.append(check_one_bounded_width_invariant_parser)
#all_subparsers += pd.add_argparsers(subparsers)
#all_subparsers += rethink.add_argparsers(subparsers)
#all_subparsers += sep.add_argparsers(subparsers)
for s in all_subparsers:
s.add_argument('--forbid-parser-rebuild', action=utils.YesNoAction, default=False,
help='force loading parser from disk (helps when running mypyvy from multiple processes)')
s.add_argument('--log', default='warning', choices=['error', 'warning', 'info', 'debug'],
help='logging level')
s.add_argument('--log-time', action=utils.YesNoAction, default=False,
help='make each log message include current time')
s.add_argument('--log-xml', action=utils.YesNoAction, default=False,
help='log in XML format')
s.add_argument('--seed', type=int, default=0, help="value for z3's smt.random_seed")
s.add_argument('--print-program',
choices=['str', 'repr', 'faithful', 'without-invariants'],
help='print program after parsing using given strategy')
s.add_argument('--key-prefix',
help='additional string to use in front of names sent to z3')
s.add_argument('--minimize-models', action=utils.YesNoAction, default=True,
help='search for models with minimal cardinality')
s.add_argument('--timeout', type=int, default=None,
help='z3 timeout (milliseconds)')
s.add_argument('--exit-on-error', action=utils.YesNoAction, default=False,
help='exit after reporting first error')
s.add_argument('--ipython', action=utils.YesNoAction, default=False,
help='run IPython with s and prog at the end')
s.add_argument('--error-filename-basename', action=utils.YesNoAction, default=False,
help='print only the basename of the input file in error messages')
s.add_argument('--query-time', action=utils.YesNoAction, default=True,
help='report how long various z3 queries take')
s.add_argument('--print-counterexample', action=utils.YesNoAction, default=True,
help='print counterexamples')
s.add_argument('--print-negative-tuples', action=utils.YesNoAction, default=False,
help='print negative counterexamples')
s.add_argument('--print-cmdline', action=utils.YesNoAction, default=True,
help='print the command line passed to mypyvy')
s.add_argument('--clear-cache', action=utils.YesNoAction, default=False,
help='do not load from cache, but dump to cache as usual '
'(effectively clearing the cache before starting)')
s.add_argument('--clear-cache-memo', action=utils.YesNoAction, default=False,
help='load only discovered states from the cache, but dump to cache as usual '
'(effectively clearing the memoization cache before starting, '
'while keeping discovered states and transitions)')
s.add_argument('--cache-only', action=utils.YesNoAction, default=False,
help='assert that the caches already contain all the answers')
s.add_argument('--cache-only-discovered', action=utils.YesNoAction, default=False,
help='assert that the discovered states already contain all the answers')
s.add_argument('--print-exit-code', action=utils.YesNoAction, default=False,
help='print the exit code before exiting (good for regression testing)')
s.add_argument('--exit-0', action=utils.YesNoAction, default=False,
help='always exit with status 0 (good for testing)')
s.add_argument('--cvc4', action='store_true',
help='use CVC4 as the backend solver. this is not very well supported.')
s.add_argument('--smoke-test-solver', action=utils.YesNoAction, default=False,
help='(for debugging mypyvy itself) double check countermodels by evaluation')
# for diagrams:
s.add_argument('--simplify-diagram', action=utils.YesNoAction,
default=(s is updr_subparser),
default_description='yes for updr, else no',
help='in diagram generation, substitute existentially quantified variables '
'that are equal to constants')
updr_subparser.add_argument('--use-z3-unsat-cores', action=utils.YesNoAction, default=True,
help='generalize using unsat cores rather than brute force')
updr_subparser.add_argument('--assert-inductive-trace', action=utils.YesNoAction, default=False,
help='(for debugging mypyvy itself) check that frames are always inductive')
verify_subparser.add_argument('--check-transition', default=None, nargs='+',
help="when verifying inductiveness, check only these transitions")
verify_subparser.add_argument('--check-invariant', default=None, nargs='+',
help="when verifying inductiveness, check only these invariants")
verify_subparser.add_argument('--json', action='store_true',
help="output machine-parseable verification results in JSON format")
updr_subparser.add_argument('--checkpoint-in',
help='start from internal state as stored in given file')
updr_subparser.add_argument('--checkpoint-out',
help='store internal state to given file') # TODO: say when
bmc_subparser.add_argument('--safety', help='property to check')
bmc_subparser.add_argument('--depth', type=int, default=3, metavar='N',
help='number of steps to check')
bmc_subparser.add_argument('--relax', action=utils.YesNoAction, default=False,
help='relaxed semantics (domain can decrease)')
argparser.add_argument('filename')
#return cast(utils.MypyvyArgs, argparser.parse_args(args))
return cast(utils.MypyvyArgs, argparser.parse_args(args))
"""
def binder_to_json(binder):
d = []
for v in binder.vs:
d.append(["var", v.name, sort_to_json(v.sort)])
return d
class Mods(object):
def __init__(self, mods):
self.mods = mods
self.in_old = False
def with_old(self):
assert (self.mods != None)
m = Mods(self.mods)
m.in_old = True
return m
def expr_to_json(fs, m, vs, e):
if isinstance(e, syntax.QuantifierExpr):
assert e.quant in ("FORALL", "EXISTS")
is_forall = e.quant == "FORALL"
decls = binder_to_json(e.binder)
w = dict(vs)
for v in e.binder.vs:
w[v.name] = sort_to_json(v.sort)
body = expr_to_json(fs, m, w, e.body)
return ["forall" if is_forall else "exists", decls, body]
elif isinstance(e, syntax.AppExpr):
so = fs[e.callee]
if (not m.in_old) and m.mods != None and e.callee in m.mods:
c = ["const", e.callee + "'", so]
else:
c = ["const", e.callee, so]
return ["apply",
c,
[expr_to_json(fs, m, vs, arg) for arg in e.args]
]
elif isinstance(e, syntax.Id):
if e.name in vs:
return ["var", e.name, vs[e.name]]
else:
assert e.name in fs
if (not m.in_old) and m.mods != None and e.name in m.mods:
return ["const", e.name + "'", fs[e.name]]
else:
return ["const", e.name, fs[e.name]]
elif isinstance(e, syntax.UnaryExpr):
if e.op == "NOT":
return ["not", expr_to_json(fs, m, vs, e.arg)]
elif e.op == "OLD":
return expr_to_json(fs, m.with_old(), vs, e.arg)
else:
print("unary", e.op)
assert False
elif isinstance(e, syntax.BinaryExpr):
if e.op == "IMPLIES":
return ["implies", expr_to_json(fs, m, vs, e.arg1), expr_to_json(fs, m, vs, e.arg2)]
elif e.op == "EQUAL":
return ["eq", expr_to_json(fs, m, vs, e.arg1), expr_to_json(fs, m, vs, e.arg2)]
elif e.op == "IFF":
return ["eq", expr_to_json(fs, m, vs, e.arg1), expr_to_json(fs, m, vs, e.arg2)]
elif e.op == "NOTEQ":
return ["not", ["eq", expr_to_json(fs, m, vs, e.arg1), expr_to_json(fs, m, vs, e.arg2)]]
else:
print("binary", e.op)
assert False
elif isinstance(e, syntax.NaryExpr):
if e.op == "AND":
return ["and", [expr_to_json(fs, m, vs, a) for a in e.args]]
if e.op == "OR":
return ["or", [expr_to_json(fs, m, vs, a) for a in e.args]]
else:
print("nary", e.op)
assert False
elif isinstance(e, syntax.IfThenElse):
return ["ite",
expr_to_json(fs, m, vs, e.branch),
expr_to_json(fs, m, vs, e.then),
expr_to_json(fs, m, vs, e.els)
]
else:
print(type(e))
print(dir(e))
assert False
def get_sorts(prog):
return [sort.name for sort in prog.sorts()]
def sort_to_json(r):
return ["uninterpretedSort", r.name]
def boolean_sort_json():
return ["booleanSort"]
def get_functions(prog):
funcs = []
for f in prog.relations_constants_and_functions():
if isinstance(f, syntax.RelationDecl):
dom = [sort_to_json(r) for r in f.arity]
rng = boolean_sort_json()
funcs.append(["const", f.name, ["functionSort", dom, rng]])
elif isinstance(f, syntax.ConstantDecl):
funcs.append(["const", f.name, sort_to_json(f.sort)])
elif isinstance(f, syntax.FunctionDecl):
dom = [sort_to_json(r) for r in f.arity]
rng = sort_to_json(f.sort)
funcs.append(["const", f.name, ["functionSort", dom, rng]])
return funcs
def get_fs(prog):
fs = {}
for f in prog.relations_constants_and_functions():
if isinstance(f, syntax.RelationDecl):
dom = [sort_to_json(r) for r in f.arity]
rng = boolean_sort_json()
fs[f.name] = ["functionSort", dom, rng]
elif isinstance(f, syntax.ConstantDecl):
fs[f.name] = sort_to_json(f.sort)
elif isinstance(f, syntax.FunctionDecl):
dom = [sort_to_json(r) for r in f.arity]
rng = sort_to_json(f.sort)
fs[f.name] = ["functionSort", dom, rng]
return fs
def get_axioms(prog):
fs = get_fs(prog)
return [expr_to_json(fs, Mods(None), {}, e.expr) for e in prog.axioms()]
def get_inits(prog):
fs = get_fs(prog)
return [expr_to_json(fs, Mods(None), {}, e.expr) for e in prog.inits()]
def get_conjs(prog):
fs = get_fs(prog)
return [expr_to_json(fs, Mods(None), {}, e.expr) for e in prog.safeties()]
def get_actions(prog):
fs = get_fs(prog)
a = {}
for e in prog.transitions():
#assert (e.num_states == 2)
decls = binder_to_json(e.binder)
vs = {v.name : sort_to_json(v.sort) for v in e.binder.vs}
mod_names = [m.name for m in e.mods]
m = Mods(mod_names)
ex = expr_to_json(fs, m, vs, e.expr)
if len(vs) > 0:
ex = ["exists", decls, ex]
a[e.name] = ["relation", mod_names, ex]
return a
def main():
filename = sys.argv[1]
utils.args = mypyvy.parse_args(['typecheck', filename])
with open(filename) as f:
contents = f.read()
prog = mypyvy.parse_program(contents, filename)
prog.resolve()
#typechecker.typecheck_program(prog)
actions = get_actions(prog)
print(json.dumps({
"sorts" : get_sorts(prog),
"functions" : get_functions(prog),
"axioms" : get_axioms(prog),
"inits" : get_inits(prog),
"conjectures" : get_conjs(prog),
"templates" : [],
"actions" : get_actions(prog),
}))
#print(prog.constants)
#print(prog.decls)
#print(prog.sorts)
#print(prog.safeties)
#print(prog.functions)
#print(prog.relations)
#print(prog.inits)
if __name__ == "__main__":
main()
| 47.862333
| 211
| 0.652325
| 3,191
| 25,032
| 4.989032
| 0.142275
| 0.053204
| 0.036935
| 0.087437
| 0.799686
| 0.781533
| 0.750879
| 0.724309
| 0.700188
| 0.696859
| 0
| 0.001822
| 0.232742
| 25,032
| 522
| 212
| 47.954023
| 0.827085
| 0.009388
| 0
| 0.232143
| 0
| 0
| 0.058685
| 0
| 0
| 0
| 0
| 0.003831
| 0.041667
| 1
| 0.083333
| false
| 0
| 0.059524
| 0.017857
| 0.297619
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86f76e688037e4fac20b189440c4ac5870e10880
| 210
|
py
|
Python
|
api.py
|
putuwinda/palette
|
2d36d34d2366f81f381ca26d7d7167517b6c34b3
|
[
"MIT"
] | 1
|
2022-02-25T17:52:05.000Z
|
2022-02-25T17:52:05.000Z
|
api.py
|
putuwinda/palette
|
2d36d34d2366f81f381ca26d7d7167517b6c34b3
|
[
"MIT"
] | null | null | null |
api.py
|
putuwinda/palette
|
2d36d34d2366f81f381ca26d7d7167517b6c34b3
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import Color
# Will make these objects available when running "flask shell"
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Color': Color}
| 26.25
| 62
| 0.757143
| 32
| 210
| 4.84375
| 0.59375
| 0.090323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152381
| 210
| 7
| 63
| 30
| 0.870787
| 0.285714
| 0
| 0
| 0
| 0
| 0.047297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
86f9aeed2634150b10ddaf27622095b46271076d
| 206
|
py
|
Python
|
SentHandler/__init__.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | 16
|
2018-09-18T13:59:59.000Z
|
2022-03-21T08:05:31.000Z
|
SentHandler/__init__.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | null | null | null |
SentHandler/__init__.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | 6
|
2020-01-05T13:03:45.000Z
|
2022-03-21T08:05:33.000Z
|
# -*- coding: utf-8 -*-
from slicing import SliceSentence, Analyze, POS_Analyze
from pre_slicing import NE_Removing, NE_labeling, POS_labeling, CleanSentence, All2oneFile
from merging import MergeSentence
| 34.333333
| 90
| 0.81068
| 26
| 206
| 6.230769
| 0.653846
| 0.160494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.116505
| 206
| 5
| 91
| 41.2
| 0.879121
| 0.101942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86fbae65e939e781080518c86ab6bc2ac8fabc41
| 223
|
py
|
Python
|
evalml/tuners/__init__.py
|
BlockchainClimateInstitute/price_microservice
|
11d1cff8965fe1befc997e9da3dc09efceed4579
|
[
"BSD-3-Clause"
] | 1
|
2021-07-28T14:20:35.000Z
|
2021-07-28T14:20:35.000Z
|
evalml/tuners/__init__.py
|
ObinnaObeleagu/evalml
|
3b5bf62b08a5a5bc6485ba5387a08c32e1857473
|
[
"BSD-3-Clause"
] | 13
|
2021-03-04T19:29:09.000Z
|
2022-03-07T01:00:43.000Z
|
evalml/tuners/__init__.py
|
RG4421/evalml
|
33c62abe6d107d1da2f54e9e44a90f18aaf916a9
|
[
"BSD-3-Clause"
] | null | null | null |
from .skopt_tuner import SKOptTuner
from .tuner import Tuner
from .tuner_exceptions import NoParamsException, ParameterError
from .random_search_tuner import RandomSearchTuner
from .grid_search_tuner import GridSearchTuner
| 37.166667
| 63
| 0.878924
| 27
| 223
| 7.037037
| 0.481481
| 0.231579
| 0.178947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09417
| 223
| 5
| 64
| 44.6
| 0.940594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
86fe9e233fccd5ef20261a5ee82437045ed27c56
| 132
|
py
|
Python
|
Pyiiko/__init__.py
|
rnd137/pyiiko
|
a702858c2ee597cbcc32317f45c2a79ec6c19035
|
[
"Apache-2.0"
] | 1
|
2019-01-17T12:18:17.000Z
|
2019-01-17T12:18:17.000Z
|
Pyiiko/__init__.py
|
rnd137/pyiiko
|
a702858c2ee597cbcc32317f45c2a79ec6c19035
|
[
"Apache-2.0"
] | null | null | null |
Pyiiko/__init__.py
|
rnd137/pyiiko
|
a702858c2ee597cbcc32317f45c2a79ec6c19035
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '0.2.1'
from Pyiiko.server import *
from Pyiiko.biz import *
from Pyiiko.card5 import *
from Pyiiko.frontWeb import *
| 22
| 29
| 0.757576
| 20
| 132
| 4.8
| 0.55
| 0.416667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035398
| 0.143939
| 132
| 5
| 30
| 26.4
| 0.814159
| 0
| 0
| 0
| 0
| 0
| 0.037879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8139d316693ecfc564efdb08d27ac25225ba8c5b
| 200
|
py
|
Python
|
bin/iamonds/polyiamonds-12345-elongated-hexagon-9x1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/iamonds/polyiamonds-12345-elongated-hexagon-9x1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/iamonds/polyiamonds-12345-elongated-hexagon-9x1.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""23,168 solutions"""
import puzzler
from puzzler.puzzles.polyiamonds12345 import Polyiamonds12345ElongatedHexagon9x1
puzzler.run(Polyiamonds12345ElongatedHexagon9x1)
| 20
| 80
| 0.82
| 19
| 200
| 8.631579
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.08
| 200
| 9
| 81
| 22.222222
| 0.76087
| 0.21
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
813f767f9b93d7f7fa11a2f24a6cbc0e1acd7f8d
| 2,258
|
py
|
Python
|
alembic/versions/d443d76c20db_add_experiment.py
|
jonathanzong/dmca
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | 2
|
2022-02-16T22:50:06.000Z
|
2022-02-21T19:38:02.000Z
|
alembic/versions/d443d76c20db_add_experiment.py
|
jonathanzong/dmca
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | 2
|
2022-02-01T05:48:07.000Z
|
2022-02-01T05:49:29.000Z
|
alembic/versions/d443d76c20db_add_experiment.py
|
jonathanzong/bartleby
|
70157cff983310e5951024aa80e99e7a5404d758
|
[
"MIT"
] | null | null | null |
"""Add experiment
Revision ID: d443d76c20db
Revises: b478383cbcc4
Create Date: 2017-12-04 01:03:04.639259
"""
# revision identifiers, used by Alembic.
revision = 'd443d76c20db'
down_revision = 'b478383cbcc4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('account_found', sa.Boolean(), nullable=True),
sa.Column('randomizations', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_development():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiments')
# ### end Alembic commands ###
def upgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('account_found', sa.Boolean(), nullable=True),
sa.Column('randomizations', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_test():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiments')
# ### end Alembic commands ###
def upgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('experiments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('account_found', sa.Boolean(), nullable=True),
sa.Column('randomizations', sa.LargeBinary(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_production():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('experiments')
# ### end Alembic commands ###
| 27.204819
| 65
| 0.669619
| 262
| 2,258
| 5.679389
| 0.244275
| 0.064516
| 0.084677
| 0.092742
| 0.776882
| 0.776882
| 0.776882
| 0.776882
| 0.776882
| 0.74328
| 0
| 0.028846
| 0.170948
| 2,258
| 82
| 66
| 27.536585
| 0.766026
| 0.267493
| 0
| 0.525
| 0
| 0
| 0.139103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.05
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
81420799603b7d825d65d7374943b9f0ce811835
| 1,886
|
py
|
Python
|
controllers/adminpanel.py
|
byancar/easystore_mvc_pos
|
7d3464fbc922d290276156238efe903d49d58f83
|
[
"MIT"
] | 52
|
2015-03-03T20:21:14.000Z
|
2020-11-15T05:54:33.000Z
|
controllers/adminpanel.py
|
nennogabriel/easy_store
|
0e165c29c83c7d28611a86076aa2b3a4545d062f
|
[
"MIT"
] | 1
|
2016-06-02T03:37:41.000Z
|
2016-06-02T17:59:47.000Z
|
controllers/adminpanel.py
|
nennogabriel/easy_store
|
0e165c29c83c7d28611a86076aa2b3a4545d062f
|
[
"MIT"
] | 42
|
2015-03-03T20:21:23.000Z
|
2020-05-11T19:35:14.000Z
|
# -*- coding: utf-8 -*-
# coding: utf8
@auth.requires_membership('admin')
def index():
return locals()
@auth.requires_membership('admin')
def products():
products_grid = SQLFORM.grid(db.product, csv=False)
return locals()
@auth.requires_membership('admin')
def product_categories():
categories_grid = SQLFORM.grid(db.category, csv=False)
return locals()
@auth.requires_membership('admin')
def orders():
return locals()
@auth.requires_membership('admin')
def store_users():
users_grid = SQLFORM.grid(db.auth_user, csv=False)
return locals()
@auth.requires_membership('admin')
def user_groups():
groups_grid = SQLFORM.grid(db.auth_membership, csv=False)
return locals()
@auth.requires_membership('admin')
def suppliers():
suppliers_grid = SQLFORM.grid(db.supplier, csv=False)
return locals()
@auth.requires_membership('admin')
def carriers():
carriers_grid = SQLFORM.grid(db.carrier, csv=False)
return locals()
@auth.requires_membership('admin')
def carriers_tax():
carriers_tax_grid = SQLFORM.grid(db.carrier_tax, csv=False)
return locals()
@auth.requires_membership('admin')
def invoices():
return locals()
@auth.requires_membership('admin')
def merchandise_returns():
return locals()
@auth.requires_membership('admin')
def statuses():
return locals()
@auth.requires_membership('admin')
def order_messages():
return locals()
@auth.requires_membership('admin')
def costumers():
costumer_grid = db(db.auth_user)
return locals()
@auth.requires_membership('admin')
def costumer_groups():
return locals()
@auth.requires_membership('admin')
def shopping_carts():
return locals()
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
| 22.452381
| 65
| 0.704666
| 226
| 1,886
| 5.716814
| 0.243363
| 0.148607
| 0.272446
| 0.334365
| 0.636223
| 0.543344
| 0.543344
| 0.283282
| 0.283282
| 0.089783
| 0
| 0.001249
| 0.151113
| 1,886
| 84
| 66
| 22.452381
| 0.805746
| 0.062566
| 0
| 0.542373
| 0
| 0
| 0.045767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.288136
| false
| 0
| 0
| 0.135593
| 0.576271
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
d4c160dca1e0a4323f1d79fe80996065bf106ba4
| 5,645
|
py
|
Python
|
webcontentdownloader/complexdownloader.py
|
hail-kang/module-webcontent-downloader
|
93d9fa209c8a14540f10298e5b483edd692021c6
|
[
"MIT"
] | null | null | null |
webcontentdownloader/complexdownloader.py
|
hail-kang/module-webcontent-downloader
|
93d9fa209c8a14540f10298e5b483edd692021c6
|
[
"MIT"
] | null | null | null |
webcontentdownloader/complexdownloader.py
|
hail-kang/module-webcontent-downloader
|
93d9fa209c8a14540f10298e5b483edd692021c6
|
[
"MIT"
] | null | null | null |
import os
import urllib.parse
from datetime import datetime
from zipfile import ZipFile
from io import BytesIO
from mimetypes import guess_extension
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from .interface import SelectorDonwloader
from .simpledownloader import SimpleDownloader
from .utils import SelectorCommand
class RequestsDownloader(SelectorDonwloader):
"""
requests 모듈을 사용하여 HTML을 얻어내는 다운로더
"""
def __init__(self, base, path,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36'
}):
"""
:Args:
- base - 다운로드 할 링크의 host를 적는다. 만약 다운로드할 url의 입력값이 상대 경로로 주어져도 처리할 수 있도록 돕는다.
- path - 다운로드 할 로컬 경로를 적는다.
- headers - headers
"""
self.base = base
self.path = path
self.headers = headers
self.downloader = SimpleDownloader(base, path, headers)
def get(self, url_or_soup, selector):
if not isinstance(selector, SelectorCommand):
raise Exception("selector must be <class 'SelectroCommand'>")
if isinstance(url_or_soup, BeautifulSoup):
soup = url_or_soup
else:
url = urllib.parse.urljoin(self.base, url_or_soup)
response = requests.get(url, headers=self.headers)
if not response.ok:
raise Exception('html not 200 error')
html = response.text
soup = BeautifulSoup(html, 'html.parser')
img_urls = map(lambda img : img[selector.attribute], soup.select(selector.element))
for img_url in img_urls:
yield self.downloader.get(img_url)
def compress(self, url_or_soup, selector):
responses = self.get(url_or_soup, selector)
file = BytesIO()
zf = ZipFile(file, 'w')
for i, response in enumerate(responses, start=1):
ext = guess_extension(response['content-type'])
zf.writestr(f'{i}{ext}', response['content'])
print(f'{i}{ext}')
return file
def download(self, url_or_soup, selector, name, compress=False):
if compress:
path = os.path.join(self.path, f'{name}.zip')
if os.path.exists(path):
pass
file = self.compress(url_or_soup, selector)
with open(path, 'wb') as f:
f.write(file.getvalue())
print(path)
else:
path = os.path.join(self.path, str(name))
if not os.path.isdir(path):
os.mkdir(path)
responses = self.get(url_or_soup, selector)
for i, response in enumerate(responses, start=1):
ext = guess_extension(response['content-type'])
with open(os.path.join(path, f'{i}{ext}'), 'wb') as f:
f.write(response['content'])
print(f'{i}{ext}')
print(path)
class SeleniumDownloader(SelectorDonwloader):
"""
selenium 모듈을 사용한 다운로더
"""
def __init__(self, base, path, driver,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36'
}):
"""
:Args:
- base - 다운로드 할 링크의 host를 적는다. 만약 다운로드할 url의 입력값이 상대 경로로 주어져도 처리할 수 있도록 돕는다.
- path - 다운로드 할 로컬 경로를 적는다.
- driver - Chrome 드라이버를 사용해야하며, Webdriver 인스턴스 또는 Chromedriver가 위치한 경로를 받는다.
- headers - headers
"""
self.base = base
self.path = path
if isinstance(driver, webdriver.Chrome):
self.driver = driver
elif isinstance(driver, str):
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x1080')
options.add_argument("--lang=ko-KR")
options.add_argument("disable-gpu")
self.driver = webdriver.Chrome(driver, options=options)
else:
raise Exception('driver must be webdriver_object or driver_location')
self.headers = headers
self.downloader = SimpleDownloader(base, path, headers)
def get(self, url_or_soup, selector):
if not isinstance(selector, SelectorCommand):
raise Exception("selector must be <class 'SelectroCommand'>")
if isinstance(url_or_soup, BeautifulSoup):
soup = url_or_soup
else:
url = urllib.parse.urljoin(self.base, url_or_soup)
self.driver.get(url)
html = self.driver.page_source
soup = BeautifulSoup(html, 'html.parser')
img_urls = map(lambda img : img[selector.attribute], soup.select(selector.element))
for img_url in img_urls:
yield self.downloader.get(img_url)
def compress(self, url_or_soup, selector):
responses = self.get(url_or_soup, selector)
file = BytesIO()
zf = ZipFile(file, 'w')
for i, response in enumerate(responses, start=1):
ext = guess_extension(response['content-type'])
zf.writestr(f'{i}{ext}', response['content'])
print(f'{i}{ext}')
return file
def download(self, url_or_soup, selector, name, compress=False):
if compress:
path = os.path.join(self.path, f'{name}.zip')
if os.path.exists(path):
pass
file = self.compress(url_or_soup, selector)
with open(path, 'wb') as f:
f.write(file.getvalue())
print(path)
else:
path = os.path.join(self.path, str(name))
if not os.path.isdir(path):
os.mkdir(path)
responses = self.get(url_or_soup, selector)
for i, response in enumerate(responses, start=1):
ext = guess_extension(response['content-type'])
with open(os.path.join(path, f'{i}{ext}'), 'wb') as f:
f.write(response['content'])
print(f'{i}{ext}')
print(path)
| 33.60119
| 130
| 0.634721
| 741
| 5,645
| 4.748988
| 0.222672
| 0.025575
| 0.046036
| 0.057971
| 0.729184
| 0.729184
| 0.716113
| 0.716113
| 0.694515
| 0.694515
| 0
| 0.015031
| 0.245704
| 5,645
| 168
| 131
| 33.60119
| 0.811414
| 0.071391
| 0
| 0.748032
| 0
| 0.015748
| 0.126126
| 0.004204
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062992
| false
| 0.015748
| 0.094488
| 0
| 0.188976
| 0.062992
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4e0c73803e946dd140863d2c7e42b68cbc86c57
| 3,905
|
py
|
Python
|
tests/test_captions.py
|
Twixes/pytube3
|
2801505a5fed495f113b0e5793a61ccf1023ba90
|
[
"MIT-0"
] | null | null | null |
tests/test_captions.py
|
Twixes/pytube3
|
2801505a5fed495f113b0e5793a61ccf1023ba90
|
[
"MIT-0"
] | null | null | null |
tests/test_captions.py
|
Twixes/pytube3
|
2801505a5fed495f113b0e5793a61ccf1023ba90
|
[
"MIT-0"
] | null | null | null |
from unittest import mock
from unittest.mock import patch, mock_open, MagicMock
from pytube import Caption, CaptionQuery, captions
def test_float_to_srt_time_format():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
assert caption1.float_to_srt_time_format(3.89) == "00:00:03,890"
def test_caption_query_all():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert caption_query.captions == [caption1, caption2]
def test_caption_query_get_by_language_code_when_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert caption_query.get_by_language_code("en") == caption1
def test_caption_query_get_by_language_code_when_not_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert caption_query.get_by_language_code("hello") is None
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption.download("title")
assert open_mock.call_args_list[0][0][0].split("/")[-1] == "title (en).srt"
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_prefix(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption.download("title", filename_prefix="1 ")
assert open_mock.call_args_list[0][0][0].split("/")[-1] == "1 title (en).srt"
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_output_path(srt):
open_mock = mock_open()
captions.target_directory = MagicMock(return_value="/target")
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
file_path = caption.download("title", output_path="blah")
assert file_path == "/target/title (en).srt"
captions.target_directory.assert_called_with("blah")
@mock.patch("pytube.captions.Caption.xml_captions")
def test_download_xml_and_trim_extension(xml):
open_mock = mock_open()
with patch("builtins.open", open_mock):
xml.return_value = ""
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
caption.download("title.xml", srt=False)
assert open_mock.call_args_list[0][0][0].split("/")[-1] == "title (en).xml"
def test_repr():
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
assert str(caption) == '<Caption lang="name1" code="en">'
@mock.patch("pytube.request.get")
def test_xml_captions(request_get):
request_get.return_value = "test"
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en"}
)
assert caption.xml_captions == "test"
| 35.18018
| 85
| 0.63201
| 451
| 3,905
| 5.243902
| 0.175166
| 0.054968
| 0.059197
| 0.07611
| 0.742495
| 0.704863
| 0.704863
| 0.704863
| 0.704863
| 0.645243
| 0
| 0.022215
| 0.193086
| 3,905
| 110
| 86
| 35.5
| 0.72834
| 0
| 0
| 0.483146
| 0
| 0
| 0.238156
| 0.04379
| 0
| 0
| 0
| 0
| 0.123596
| 1
| 0.11236
| false
| 0
| 0.033708
| 0
| 0.146067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4e3f9ce4c1455793a4d7abad2d5b1bff1f2e38d
| 121
|
py
|
Python
|
6 kyu/Sorting by bits.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/Sorting by bits.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/Sorting by bits.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def sort_by_bit(arr):
return sorted(arr, key=lambda x: (count(x), x))
def count(n):
return bin(n)[2:].count("1")
| 30.25
| 51
| 0.619835
| 23
| 121
| 3.173913
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.165289
| 121
| 4
| 52
| 30.25
| 0.70297
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0794d42436c5bde59d02169e1c2179fc19daaeeb
| 307
|
py
|
Python
|
code/simonsays.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | 8
|
2020-02-21T22:21:01.000Z
|
2022-02-16T05:30:54.000Z
|
code/simonsays.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | null | null | null |
code/simonsays.py
|
matthewReff/Kattis-Problems
|
848628af630c990fb91bde6256a77afad6a3f5f6
|
[
"MIT"
] | 3
|
2020-08-05T05:42:35.000Z
|
2021-08-30T05:39:51.000Z
|
def simonsays():
cases = input()
instruction = ""
for i in range(0, cases):
instruction = raw_input()
if len(instruction) > len("Simon says"):
if instruction[ : len("Simon says")] == "Simon says":
print instruction[ len("Simon says") + 1:]
simonsays()
| 34.111111
| 65
| 0.553746
| 34
| 307
| 4.970588
| 0.5
| 0.213018
| 0.337278
| 0.408284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.296417
| 307
| 9
| 66
| 34.111111
| 0.773148
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07a5aedf245eee1938580ab0af72d2d24e95b574
| 618
|
py
|
Python
|
wireui/library/typedefs/peers.py
|
TheTimmoth/wireui
|
eda16eeb828a0c38f63074492877e0be71e32715
|
[
"MIT"
] | 1
|
2020-05-24T05:43:27.000Z
|
2020-05-24T05:43:27.000Z
|
wireui/library/typedefs/peers.py
|
TheTimmoth/wireui
|
eda16eeb828a0c38f63074492877e0be71e32715
|
[
"MIT"
] | 15
|
2020-05-26T18:37:14.000Z
|
2021-08-20T11:15:10.000Z
|
wireui/library/typedefs/peers.py
|
TheTimmoth/wireui
|
eda16eeb828a0c38f63074492877e0be71e32715
|
[
"MIT"
] | 1
|
2020-11-19T05:14:02.000Z
|
2020-11-19T05:14:02.000Z
|
# peers.py
# Peers for wireguard
# Author: Tim Schlottmann
# from collections import UserDict # creates not JSON serializable error
from typing import Dict
# TODO: Evaluate TypedDict when python3.8 is available
Keys = dict
# TODO: Evaluate TypedDict when python3.8 is available
RedirectAllTraffic = dict
# TODO: Evaluate TypedDict when python3.8 is available
PeerItems = dict
class Peers(dict):
""" Peers for wireguard """
def __getitem__(self, peer_name) -> PeerItems:
return super().__getitem__(peer_name)
def __setitem__(self, peer_name, peer: PeerItems):
super().__setitem__(peer_name, peer)
| 24.72
| 73
| 0.754045
| 80
| 618
| 5.575
| 0.475
| 0.071749
| 0.107623
| 0.168161
| 0.32287
| 0.32287
| 0.32287
| 0.32287
| 0.32287
| 0
| 0
| 0.011605
| 0.16343
| 618
| 24
| 74
| 25.75
| 0.851064
| 0.491909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.111111
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
07c4f5b32a4422f633e4276fb24f19d77acaa9db
| 69
|
py
|
Python
|
harness/determined/experimental/estimator/__init__.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 3
|
2020-04-30T03:56:15.000Z
|
2020-04-30T04:01:24.000Z
|
harness/determined/experimental/estimator/__init__.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:31:44.000Z
|
2022-02-10T07:31:44.000Z
|
harness/determined/experimental/estimator/__init__.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 2
|
2020-07-10T23:08:23.000Z
|
2021-01-13T10:01:59.000Z
|
from determined.experimental.estimator._estimator_native import init
| 34.5
| 68
| 0.898551
| 8
| 69
| 7.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 69
| 1
| 69
| 69
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6afdef2c05737740a73f637fdfeeb8e380aa30b0
| 113
|
py
|
Python
|
eds/openmtc-gevent/common/openmtc-onem2m/src/openmtc_onem2m/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/common/openmtc-onem2m/src/openmtc_onem2m/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/common/openmtc-onem2m/src/openmtc_onem2m/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
from openmtc_onem2m.transport import AdditionalInformation, MetaInformation, \
OneM2MRequest, OneM2MResponse
| 37.666667
| 78
| 0.849558
| 9
| 113
| 10.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029703
| 0.106195
| 113
| 2
| 79
| 56.5
| 0.910891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ed457f9e650bccbb5d13b5ba6a9e96243fa669c0
| 122
|
py
|
Python
|
tools/demoyml.py
|
likelyzhao/pysot
|
01de9d4817904c68b65ddb8aba47cbf3cb9b695a
|
[
"Apache-2.0"
] | null | null | null |
tools/demoyml.py
|
likelyzhao/pysot
|
01de9d4817904c68b65ddb8aba47cbf3cb9b695a
|
[
"Apache-2.0"
] | null | null | null |
tools/demoyml.py
|
likelyzhao/pysot
|
01de9d4817904c68b65ddb8aba47cbf3cb9b695a
|
[
"Apache-2.0"
] | 1
|
2019-12-11T02:44:36.000Z
|
2019-12-11T02:44:36.000Z
|
from pysot.core.config import cfg
cfg.merge_from_file("experiments/siamrpn_r50_l234_dwxcorr_8gpu/config.yaml")
print(cfg)
| 30.5
| 76
| 0.852459
| 20
| 122
| 4.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 0.04918
| 122
| 4
| 77
| 30.5
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0.430894
| 0.430894
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ed630a2493086f257d13c40f7a96316b50ad937a
| 114,512
|
py
|
Python
|
pipeline/test_drift_synchonization.py
|
streamsets/datacollector-tests
|
6c3e908768e1d4a586e9183e2141096921ecd5be
|
[
"Apache-2.0"
] | 14
|
2019-03-04T10:12:39.000Z
|
2021-11-24T16:17:09.000Z
|
pipeline/test_drift_synchonization.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 48
|
2019-03-08T14:59:06.000Z
|
2021-08-13T14:49:56.000Z
|
pipeline/test_drift_synchonization.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 23
|
2018-09-24T20:49:17.000Z
|
2021-11-24T16:17:11.000Z
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import string
import time
import uuid
from collections import OrderedDict
from datetime import datetime
from decimal import Decimal
from itertools import groupby, chain
from operator import itemgetter
from urllib.parse import urlparse
import pytest
import sqlalchemy
from streamsets.testframework.environments.cloudera import ClouderaManagerCluster
from streamsets.testframework.environments.hortonworks import AmbariCluster
from streamsets.testframework.markers import database, cluster, sdc_min_version
from streamsets.testframework.utils import get_random_string, Version
logger = logging.getLogger(__name__)
DEV_DATA_GEN_FIELD_LIST = [{'field': 'stringField', 'type': 'STRING', 'precision': 10, 'scale': 2},
{'field': 'intField', 'type': 'INTEGER', 'precision': 10, 'scale': 2},
{'field': 'longField', 'type': 'LONG', 'precision': 10, 'scale': 2},
{'field': 'floatField', 'type': 'FLOAT', 'precision': 10, 'scale': 2},
{'field': 'doubleField', 'type': 'DOUBLE', 'precision': 10, 'scale': 2},
{'field': 'dateField', 'type': 'DATE', 'precision': 10, 'scale': 2},
{'field': 'decimalField', 'type': 'DECIMAL', 'precision': 10, 'scale': 5}]
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-groovy_2_4-lib')
return hook
@cluster('cdh')
@database
def test_query_with_parquet(sdc_builder, sdc_executor, cluster, database):
"""Validate end-to-end case with stopping the pipeline and executing the map/reduce job after it read all the
data from database. Addresses Hive drift synchronization solution in parquet data format. The pipeline looks like:
jdbc_query_consumer >= pipeline_finisher_executor
jdbc_query_consumer >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
"""
if 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
table_name = get_random_string(string.ascii_lowercase, 20) # lowercase for db compatibility (e.g. PostgreSQL)
table = sqlalchemy.Table(table_name, sqlalchemy.MetaData(),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(25)),
sqlalchemy.Column('dt', sqlalchemy.String(20)))
rows_in_database = [{'id': 1, 'name': 'Ji Sun', 'dt': '2017-05-03'},
{'id': 2, 'name': 'Jarcec', 'dt': '2017-05-03'},
{'id': 3, 'name': 'Santhosh', 'dt': '2017-05-03'}]
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer')
jdbc_query_consumer.set_attributes(incremental_mode=False, sql_query=f'SELECT * FROM {table_name};')
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'dt',
'headerAttributeExpression': "${record:value('/dt')}"}])
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/dt'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='PARQUET', database_expression=f'{database.database}',
table_name="${record:attribute('jdbc.tables')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
# max_records_in_file enables to close the file and generate the event
hadoop_fs.set_attributes(avro_schema_location='HEADER', data_format='AVRO', directory_in_header=True,
max_records_in_file=1)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
mapreduce = pipeline_builder.add_stage('MapReduce', type='executor')
mapreduce.set_attributes(job_type='AVRO_PARQUET',
output_directory="${file:parentPath(file:parentPath(record:value('/filepath')))}")
wiretap = pipeline_builder.add_wiretap()
pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')
jdbc_query_consumer >= pipeline_finisher_executor
jdbc_query_consumer >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
mapreduce >= wiretap.destination
pipeline = pipeline_builder.build(title='Hive drift test').configure_for_environment(cluster, database)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding %s rows into %s database ...', len(rows_in_database), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_database)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# assert events (MapReduce) generated
assert len(wiretap.output_records) == len(rows_in_database)
# make sure MapReduce job is done and is successful
for event in wiretap.output_records:
job_id = event.field['job-id'].value
assert cluster.yarn.wait_for_job_to_end(job_id) == 'SUCCEEDED'
# verify inserted data
hive_cursor.execute(f'RELOAD {_get_qualified_table_name(None, table_name)}')
hive_cursor.execute(f'SELECT * from {_get_qualified_table_name(None, table_name)}')
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in rows_in_database]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute(f'DROP TABLE `{table_name}`')
@cluster('cdh', 'hdp')
def test_null_fields(sdc_builder, sdc_executor, cluster):
"""Validate Null fields of different data type and see hive store and returns NULL. The pipeline looks like:
dev_data_generator >> field_value_replacer >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
dev_data_generator >= pipeline_finisher
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.set_attributes(batch_size=1,
delay_between_batches=10)
dev_data_generator.fields_to_generate = DEV_DATA_GEN_FIELD_LIST
field_value_replacer = pipeline_builder.add_stage('Value Replacer')
field_value_replacer.fields_to_null = [{'fieldsToNull': ['/' + field_and_type_info['field']
for field_and_type_info in DEV_DATA_GEN_FIELD_LIST]}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO', database_expression='default',
partition_configuration=[],
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
# max_records_in_file enables to close the file and generate the event
hadoop_fs.set_attributes(avro_schema_location='HEADER', data_format='AVRO', directory_in_header=True,
max_records_in_file=1)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
pipeline_finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
dev_data_generator >> field_value_replacer >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
dev_data_generator >= pipeline_finisher # stop after first batch
pipeline = pipeline_builder.build(title='Hive drift test - Null values').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
hive_cursor.execute(f'RELOAD `{table_name}`')
hive_cursor.execute(f'SELECT * from `{table_name}`')
hive_values = [list(row) for row in hive_cursor.fetchall()]
logger.debug('read_data = {}'.format(hive_values))
assert 1 == len(hive_values)
hive_row = hive_values[0]
for value in hive_row:
assert not value
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute(f'DROP TABLE `{table_name}`')
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('db', ['', 'default', 'custom'])
@pytest.mark.parametrize('stored_as_avro', [True, False])
@pytest.mark.parametrize('external_table', [True, False])
@pytest.mark.parametrize('partitioned', [True, False])
def test_cold_start(sdc_builder, sdc_executor, cluster, db, stored_as_avro, external_table, partitioned):
"""Validate Cold Start no table and no data. This test also tests different types of table and methods of creation.
The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
db_for_path = 'default' if not db else f'{db}.db' if db != 'default' else db
database_location_for_table_path = (f'/tmp/sdc/hive/warehouse/{db_for_path}'
if external_table else f'/user/hive/warehouse/{db_for_path}')
table_path_template = f'{database_location_for_table_path}/{table_name}' if external_table else ''
raw_data = [dict(id=1, name='abc'), dict(id=2, name='def'), dict(id=3, name='ghi')]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'db',
'headerAttributeExpression': db},
{'attributeToSet': 'table_name',
'headerAttributeExpression': table_name}])
partition_configuration = [{'name': 'dt', 'valueType': 'STRING',
'valueEL': '${YYYY()}-${MM()}-${DD()}'}] if partitioned else []
partition_path_template = 'dt=${YYYY()}-${MM()}-${DD()}' if partitioned else ''
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('db')}",
external_table=external_table,
table_path_template=table_path_template,
partition_configuration=partition_configuration,
partition_path_template=partition_path_template,
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name="${record:attribute('table_name')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
hive_metastore.set_attributes(stored_as_avro=stored_as_avro)
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Cold Start').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
if db:
hive_cursor.execute(f'CREATE DATABASE IF NOT EXISTS`{db}`')
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(db, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(db, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in raw_data]
if partitioned:
for i in range(len(raw_values)):
raw_values[i] = raw_values[i] + [datetime.now().strftime('%Y-%m-%d')]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(db, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(db, table_name)))
if db and db != 'default':
logger.info('Dropping Database %s in Hive...', db)
hive_cursor.execute('DROP DATABASE IF EXISTS`{0}`'.format(db))
if external_table:
logger.info('Deleting Hadoop FS directory %s ...', database_location_for_table_path)
cluster.hdfs.client.delete(database_location_for_table_path, recursive=True)
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('db', ['custom'])
@pytest.mark.parametrize('external_table', [True, False])
@pytest.mark.parametrize('custom_database_location', ['/tmp/sdc/hive/warehouse/custom', ''])
@pytest.mark.parametrize('custom_table_location', ['/tmp/sdc/hive/warehouse/table', ''])
def test_database_and_table_location(sdc_builder, sdc_executor, cluster,
db, external_table, custom_database_location, custom_table_location):
"""Validate combination of custom and default location for database and table and assert data is read properly
and locations are right. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
table_name = get_random_string(string.ascii_lowercase, 20)
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
if custom_table_location and not external_table:
pytest.skip('Test skipped : SDC-5459: Hive processor is ignoring location for internal tables')
table_path = (f'{custom_table_location}' if custom_table_location
else (f'{custom_database_location}/{table_name}' if custom_database_location
else f'/tmp/sdc/hive/warehouse/{db}.db/{table_name}' if external_table else ''))
raw_data = [dict(id=1, name='abc'), dict(id=2, name='def'), dict(id=3, name='ghi')]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'db',
'headerAttributeExpression': db},
{'attributeToSet': 'table_name',
'headerAttributeExpression': table_name}])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('db')}",
external_table=external_table,
table_path_template=table_path,
partition_configuration=[],
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name="${record:attribute('table_name')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
# CDH 7 works in a bit more mysterious ways and it seems that the default creation path doesn't allow us
# to write data into HDFS if Hive metastore created the directory for the table first.
if isinstance(cluster, ClouderaManagerCluster) and cluster.version.startswith('cdh7'):
hadoop_fs.impersonation_user = "root"
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Location').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
create_database_command = (f'CREATE DATABASE IF NOT EXISTS `{db}` LOCATION "{custom_database_location}"'
if custom_database_location else f'CREATE DATABASE IF NOT EXISTS `{db}`')
hive_cursor.execute(create_database_command)
# internal table_with_custom_location
if not external_table and custom_table_location:
create_table_command_template_prefix = ('CREATE TABLE IF NOT EXISTS {0} (id int, name string)'
' STORED AS AVRO').format(_get_qualified_table_name(db, table_name))
create_table_command = create_table_command_template_prefix + f' LOCATION "{custom_table_location}"'
hive_cursor.execute(create_table_command)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished(timeout_sec=10)
location_of_table = _get_table_location(hive_cursor, db, table_name)
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(db, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(db, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in raw_data]
assert sorted(hive_values) == sorted(raw_values)
# check location of the table
if custom_table_location:
expected_location_of_table = custom_table_location
elif custom_database_location:
expected_location_of_table = f'{custom_database_location}/{table_name}'
elif external_table:
expected_location_of_table = table_path
else:
if isinstance(cluster, ClouderaManagerCluster) and cluster.version.startswith('cdh7'):
# https://docs.cloudera.com/cdp/latest/data-migration/topics/cdp-data-migration-table-create.html
# CDH 7 uses ACID for managed tables, so "internal non-acid tables" are slashed into external directory.
expected_location_of_table = f'{_get_hive_warehouse_external_dir(hive_cursor)}/{db}.db/{table_name}'
else:
expected_location_of_table = f'{_get_hive_warehouse_dir(hive_cursor)}/{db}.db/{table_name}'
assert expected_location_of_table == location_of_table
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(db, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(db, table_name)))
logger.info('Dropping Database %s in Hive...', db)
hive_cursor.execute('DROP DATABASE IF EXISTS`{0}`'.format(db))
# delete only if external table
if external_table:
if table_path:
logger.info('Deleting Hadoop FS directory %s ...', table_path)
cluster.hdfs.client.delete(table_path, recursive=True)
db_location_to_delete = custom_database_location or f'/tmp/sdc/hive/warehouse/{db}.db'
logger.info('Deleting Hadoop FS directory %s ...', db_location_to_delete)
cluster.hdfs.client.delete(db_location_to_delete, recursive=True)
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_partition_locations(sdc_builder, sdc_executor, cluster):
"""Store data into a Hive managed table with multicolumn partitioning, and then check that the partition locations
specified by Hive Metadata is actually where Hive Metastore have created them.
Pipeline configuration:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
pipeline_builder = sdc_builder.get_pipeline_builder()
raw_data = [dict(id=1, name='abc', part1=get_random_string(), part2=get_random_string(), part3=get_random_string()),
dict(id=2, name='def', part1=get_random_string(), part2=get_random_string(), part3=get_random_string()),
dict(id=3, name='ghi', part1=get_random_string(), part2=get_random_string(), part3=get_random_string())]
# Dev raw data source
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=''.join(json.dumps(d) for d in raw_data),
stop_after_first_batch=True)
# Expression evaluator
database = get_random_string(string.ascii_lowercase, 20)
table_name = get_random_string(string.ascii_lowercase, 20)
header_attributes = [{'attributeToSet': 'database', 'headerAttributeExpression': database},
{'attributeToSet': 'table_name', 'headerAttributeExpression': table_name},
{'attributeToSet': 'part1', 'headerAttributeExpression': "${record:value('/part1')}"},
{'attributeToSet': 'part2', 'headerAttributeExpression': "${record:value('/part2')}"},
{'attributeToSet': 'part3', 'headerAttributeExpression': "${record:value('/part3')}"}]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=header_attributes)
# Field Remover
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part1', '/part2', '/part3'])
# Hive Metadata
part1_name = 'part_' + get_random_string()
part2_name = 'part_' + get_random_string()
part3_name = 'part_' + get_random_string()
partition_configuration = [{'name': part1_name, 'valueType': 'STRING', 'valueEL': "${record:attribute('part1')}"},
{'name': part2_name, 'valueType': 'STRING', 'valueEL': "${record:attribute('part2')}"},
{'name': part3_name, 'valueType': 'STRING', 'valueEL': "${record:attribute('part3')}"}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('database')}",
table_name="${record:attribute('table_name')}",
external_table=False,
partition_configuration=partition_configuration)
# Hadoop FS
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
# Hive Metastore
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
# Build pipeline
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> [hive_metastore, wiretap.destination]
pipeline = pipeline_builder.build(title='Hive drift - Test Partition Locations').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
qualified_table_name = _get_qualified_table_name(database, table_name)
hive_cursor = cluster.hive.client.cursor()
hive_cursor.execute(f'CREATE DATABASE IF NOT EXISTS {database}')
hive_cursor.execute(f'USE {database}')
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
partition_metadata = [rec for rec in wiretap.output_records if rec.field['type'] == 'PARTITION']
# Query the partition locations to the Hive database and compare them against the location generated by Hive
# Metadata. Also, we are using the default location and therefore 'customLocation' must be always False.
for raw, rec in zip(raw_data, partition_metadata):
hive_cursor.execute(f"SHOW TABLE EXTENDED LIKE '{table_name}' PARTITION ({part1_name}='{raw['part1']}', {part2_name}='{raw['part2']}', {part3_name}='{raw['part3']}')")
real_location = [row[0] for row in hive_cursor.fetchall() if row[0].startswith('location:')][0]
assert 'customLocation' in rec.field and rec.field['customLocation'] == False
assert real_location.endswith(str(rec.field['location']))
finally:
logger.info('Dropping table %s in Hive...', qualified_table_name)
hive_cursor.execute(f'DROP TABLE IF EXISTS {qualified_table_name}')
logger.info('Dropping database %s in Hive...', database)
hive_cursor.execute(f'DROP DATABASE IF EXISTS {database}')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('sdc_type, hive_type, supported',
[('BOOLEAN', 'BOOLEAN', True), ('STRING', 'STRING', True), ('INTEGER', 'INT', True),
('SHORT', 'INT', True), ('LONG', 'BIGINT', True), ('FLOAT', 'FLOAT', True),
('DOUBLE', 'DOUBLE', True), ('DECIMAL', 'DECIMAL(4,2)', True),
('BYTE_ARRAY', 'BINARY', True), ('BYTE', None, False),
('MAP', None, False), ('LIST_MAP', None, False)])
def test_sdc_types(sdc_builder, sdc_executor, cluster, sdc_type, hive_type, supported):
"""Validate Different Types of SDC Fields (supported and unsupported for hive) and assert data present
or error records. The pipeline looks like:
dev_data_generator >> expression_evaluator >> groovy_evaluator >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
dev_data_generator >= pipeline_finisher
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.set_attributes(batch_size=1,
delay_between_batches=10)
dev_data_generator.fields_to_generate = [{'field': 'id', 'precision': 10, 'scale': 2, 'type': 'INTEGER'}]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'sdc_type',
'headerAttributeExpression': sdc_type}])
groovy_evaluator = pipeline_builder.add_stage('Groovy Evaluator', type='processor')
script = """
def get_value_for_sdc_type(sdc_type) {
switch (sdc_type) {
case 'BOOLEAN':
return true;
case 'INTEGER':
return 1;
case 'SHORT':
return ((short)1);
case 'LONG':
return 1L;
case 'FLOAT':
return 1.0f;
case 'DOUBLE':
return 1.0d;
case 'DECIMAL':
return BigDecimal.valueOf(12.12);
case 'BYTE_ARRAY':
return 'abc'.getBytes();
case 'BYTE':
return 'abc'.getBytes()[0];
case 'MAP':
def map_field = sdcFunctions.createMap(false);
[firstName:'John', lastName:'Doe'].each {
map_field.put(it.key, it.value)
};
return map_field;
case 'LIST_MAP':
def list_map_field = sdcFunctions.createMap(true);
[firstName:'John', lastName:'Doe'].each {
list_map_field.put(it.key, it.value)
};
return list_map_field;
case 'LIST':
return ['abc', 'def'];
default:
return 'abc'
}
}
for (record in records) {
try {
def sdc_type = record.attributes['sdc_type']
record.value['custom'] = get_value_for_sdc_type(sdc_type)
output.write(record)
} catch (e) {
log.error(e.toString(), e)
error.write(record, e.toString())
}
}
"""
groovy_evaluator.set_attributes(enable_invokedynamic_compiler_option=True,
record_processing_mode='BATCH', script=script)
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
pipeline_finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
wiretap = pipeline_builder.add_wiretap()
dev_data_generator >> expression_evaluator >> groovy_evaluator >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
dev_data_generator >= pipeline_finisher
pipeline = pipeline_builder.build(title='Hive drift test - SDC Types').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
if not supported:
assert len(wiretap.error_records) == 1
assert len(wiretap.output_records) == 0
else:
assert len(wiretap.error_records) == 0
assert len(wiretap.output_records) == 1
column_and_types_from_hive = _get_table_columns_and_type(hive_cursor, None, table_name)
assert column_and_types_from_hive['custom'].upper() == hive_type
hive_cursor.execute(f'RELOAD {_get_qualified_table_name(None, table_name)}')
hive_cursor.execute(f'SELECT * from {_get_qualified_table_name(None, table_name)}')
hive_values = [list(row) for row in hive_cursor.fetchall()]
assert len(hive_values) == 1
hive_values = hive_values[0]
custom_value = wiretap.output_records[0].field['custom'].value
# hive client returns the binary as string
custom_value = custom_value.decode() if hive_type == 'BINARY' else custom_value
expected_row_values = [wiretap.output_records[0].field['id'], custom_value]
assert hive_values == expected_row_values
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('partition_type, partition_value', [('INT', 1), ('BIGINT', 1), ('STRING', 'abc')])
def test_partition_types(sdc_builder, sdc_executor, cluster, partition_type, partition_value):
"""Validate different supported partition types and assert data is read properly. The pipeline looks like:
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [OrderedDict(id=1, name='abc', value=partition_value, part=partition_value)]
dev_raw_data_source_data = ''.join(json.dumps({k: v for k, v in d.items() if k != 'part'}) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
partition_configuration = [{'name': 'part', 'valueType': partition_type, 'valueEL': '${record:value("/value")}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Partition Types').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in raw_data]
assert len(hive_values) == 1
assert hive_values == raw_values
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_multiplexing(sdc_builder, sdc_executor, cluster):
"""Validate multiplexing tables and assert data is right. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_suffix = get_random_string(string.ascii_lowercase, 10)
raw_data = [dict(id=1, name='San Francisco', table=f'towns_{table_suffix}', country='US', year='2016'),
dict(id=2, customer='John', value=200, table=f'invoice_{table_suffix}', country='India', year='2015'),
dict(id=3, name='Friedberg', table=f'towns_{table_suffix}', country='Germany', year='2017'),
dict(id=4, customer='James', value=300, table=f'invoice_{table_suffix}',
country='Argentina', year='2014')]
table_to_rows = {k: [[val for key, val in v.items() if key != 'table']
for v in sorted(list(g), key=itemgetter('id'))]
for k, g in groupby(sorted(raw_data, key=itemgetter('table')), key=itemgetter('table'))}
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
partition_configuration = [{'name': 'country', 'valueType': 'STRING', 'valueEL': '${record:attribute("country")}'},
{'name': 'year', 'valueType': 'STRING', 'valueEL': '${record:attribute("year")}'}]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'table',
'headerAttributeExpression': "${record:value('/table')}"},
{'attributeToSet': 'country',
'headerAttributeExpression': "${record:value('/country')}"},
{'attributeToSet': 'year',
'headerAttributeExpression': "${record:value('/year')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/table', '/country', '/year'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name='${record:attribute("table")}')
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Multiplexing').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
for table_name, expected_rows in table_to_rows.items():
logger.info('Validating table %s in Hive...', table_name)
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
assert sorted(hive_values, key=itemgetter(0)) == expected_rows
finally:
for table_name in set([r['table'] for r in raw_data]):
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_special_characters_in_partition_value(sdc_builder, sdc_executor, cluster):
"""Validate special characters for partition value . The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
partition_values = OrderedDict([("-", True), ("_", True), ("$", True), (",", True),
("(", True), (")", True), ("&", True), ("@", True),
("!", True), (".", True), ("|", True), ("~", True),
("`", True),
("\\", False), ("'", False), ("[", False), ("]", False),
("/", False), ("?", False), ("*", False), ("\"", False),
("%", False), ("=", False), ("^", False)])
unsupported_partition_values = set([partition_value for partition_value, supported in partition_values.items()
if not supported])
raw_data = [dict(id=str(uuid.uuid4()), part=partition_value) for partition_value in partition_values.keys()]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
partition_configuration = [{'name': 'part', 'valueType': 'STRING', 'valueEL': '${record:attribute("part")}'}]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'part',
'headerAttributeExpression': "${record:value('/part')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build().configure_for_environment(cluster)
pipeline.configuration['errorRecordPolicy'] = 'ORIGINAL_RECORD'
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
error_records = wiretap.error_records
assert len(unsupported_partition_values) == len(error_records)
for error_record in error_records:
assert error_record.field['part'] in unsupported_partition_values
logger.info('Validating Supported Partition Characters for Table %s in Hive...', table_name)
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
hive_values = sorted(hive_values, key=itemgetter(1))
expected_values = sorted([list(row.values()) for row in raw_data
if partition_values[row['part']]], key=itemgetter(1))
assert expected_values == hive_values
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('table_or_column', [True, False])
@pytest.mark.parametrize('special_character', ['#', '-', '$', '.'])
def test_special_characters_in_table_and_columns(sdc_builder, sdc_executor, cluster,
table_or_column, special_character):
"""Validate special characters for table and columns. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
# https://docs.cloudera.com/cdp/latest/data-migration/topics/cdp-data-migration-dbtable.html
if isinstance(cluster, ClouderaManagerCluster) and cluster.version.startswith('cdh7') and special_character == '.':
pytest.skip(f"CDH 7 ({cluster.version}) doesn't support dot in a table name")
object_name_prefix_suffix = get_random_string(string.ascii_lowercase, 5)
object_name = f'{object_name_prefix_suffix}{special_character}{object_name_prefix_suffix}'
table_name = object_name if table_or_column else object_name_prefix_suffix
col_name = object_name if not table_or_column else object_name_prefix_suffix
raw_data = dict(id=str(uuid.uuid4()), table=table_name)
raw_data[col_name] = str(uuid.uuid4())
dev_raw_data_source_data = ''.join(json.dumps(raw_data))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'table',
'headerAttributeExpression': "${record:value('/table')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name="${record:attribute('table')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = (pipeline_builder.build(title='Hive drift test - Table/Column Special Characters')
.configure_for_environment(cluster))
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert 1 == len(wiretap.error_records)
# TODO: TLKT-41 - Add support for reading reserved headers from RecordHeader to assert error code
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('keyword', ["table", "create", "date", "as", "year", "string", "default"])
def test_keywords_in_object_names(sdc_builder, sdc_executor, cluster, keyword):
"""Validate different keywords in table/database name. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = keyword
db = keyword
raw_data = dict(id=str(uuid.uuid4()), part=str(uuid.uuid4()))
dev_raw_data_source_data = ''.join(json.dumps(raw_data))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'database',
'headerAttributeExpression': db},
{'attributeToSet': 'table',
'headerAttributeExpression': table_name},
{'attributeToSet': 'part',
'headerAttributeExpression': "${record:value('/part')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part'])
partition_configuration = [{'name': 'part', 'valueType': 'STRING', 'valueEL': '${record:attribute("part")}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('database')}",
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name="${record:attribute('table')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Keywords').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
hive_cursor.execute(f'CREATE DATABASE IF NOT EXISTS `{db}`')
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
logger.info('Validating table %s in Hive...', _get_qualified_table_name(db, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(db, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(db, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
assert 1 == len(hive_values)
assert hive_values[0] == list([v for v in raw_data.values()])
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(db, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(db, table_name)))
if db != 'default':
logger.info('Dropping Database %s in Hive...', db)
hive_cursor.execute('DROP DATABASE IF EXISTS`{0}`'.format(db))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('location', ['', 'schemaFolder', '/tmp/absoluteLocation', "${str:concat('a', 'b')}"])
def test_hdfs_schema_serialization(sdc_builder, sdc_executor, cluster, location):
"""Validate schema location exists when STORED_AS_AVRO is unchecked. The pipeline looks like:
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = dict(id=str(uuid.uuid4()), table=table_name)
dev_raw_data_source_data = ''.join(json.dumps(raw_data))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=True,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name,
table_path_template=f"/tmp/{table_name}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
hive_metastore.stored_as_avro = False
if location:
hive_metastore.schema_folder_location = location
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = (pipeline_builder.build(title='Hive drift test - Serialization Location')
.configure_for_environment(cluster))
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
if str.startswith(location, '/'):
expected_location = location
elif location == "${str:concat('a', 'b')}":
expected_location = f'/tmp/{table_name}/ab'
elif location:
expected_location = f'/tmp/{table_name}/{location}'
else:
expected_location = f'/tmp/{table_name}/.schemas'
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
status = cluster.hdfs.client.status(expected_location)
assert status is not None and status['type'] == 'DIRECTORY'
finally:
# Delete schema location
logger.info('Deleting Schema Directory %s in Hadoop FS...', expected_location)
cluster.hdfs.client.delete(expected_location, recursive=True)
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_decimal_values(sdc_builder, sdc_executor, cluster, keep_data):
"""Validate different decimal values. The pipeline looks like:
dev_raw_data_source >> field_type_converter >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
valid_rows = [dict(id=1, number=12.12), dict(id=2, number=1.0), dict(id=3, number=12.0),
dict(id=4, number=0.1), dict(id=5, number=0.12), dict(id=6, number=12)]
# incompatible scale, precision
invalid_rows = [dict(id=7, number=0.123), dict(id=8, number=12345)]
raw_data = valid_rows + invalid_rows
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
field_type_converter = pipeline_builder.add_stage('Field Type Converter')
field_type_converter.conversion_method = 'BY_FIELD'
field_type_converter.set_attributes(field_type_converter_configs=[{'fields': ['/number'],
'targetType':'DECIMAL', }])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=True,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name,
table_path_template=f"/tmp/{table_name}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
if isinstance(cluster, ClouderaManagerCluster) and cluster.version.startswith('cdh7'):
if cluster.kerberized_services:
hadoop_fs.impersonation_user = "sdctest"
else:
hadoop_fs.impersonation_user = "root"
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> field_type_converter >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
create_table_command = ('CREATE EXTERNAL TABLE IF NOT EXISTS {0} (id int, number decimal(4, 2))'
' STORED AS AVRO LOCATION "/tmp/{1}"').format(_get_qualified_table_name(None, table_name), table_name)
hive_cursor.execute(create_table_command)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
logger.info('Validating table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
assert hive_values == [[Decimal(str(v)) if k == 'number' else v for k, v in row.items()]
for row in valid_rows]
error_values = [[fld for k, fld in error_record.field.items()] for error_record in wiretap.error_records]
assert error_values == [[Decimal(str(v)) if k == 'number' else v for k, v in row.items()]
for row in invalid_rows]
finally:
if not keep_data:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_partial_input(sdc_builder, sdc_executor, cluster):
"""Validate partial inputs. The pipeline looks like:
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [dict(idx=0), dict(idx=1),
dict(idx=2, id='id2'), dict(idx=3, id='id3'),
dict(idx=4, name='name4'), dict(idx=5, name='id4'),
dict(idx=6, value='value6'), dict(idx=7, value='value7'),
dict(idx=8, value='value8', id='id8'), dict(idx=9, value='value9', id='id9'),
dict(idx=10, name='name10', id='id10'), dict(idx=11, name='name11', id='id11')]
cols = ['idx', 'id', 'name', 'value']
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=False,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Partial Input').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
logger.info('Validating table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
expected_data = [[r[col] if col in r else None for col in cols] for r in raw_data]
assert sorted(hive_values) == sorted(expected_data)
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('external_table', [True, False])
def test_column_drift(sdc_builder, sdc_executor, cluster, external_table):
"""Validate Column Drift in inputs. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name_suffix = get_random_string(string.ascii_lowercase, 20)
raw_data = [OrderedDict([('id', 0), ('table', f'column_rename_add{table_name_suffix}')]),
OrderedDict([('id', 1), ('col1', 'col11'), ('table', f'column_rename_add{table_name_suffix}')]),
OrderedDict([('id', 2), ('col2', 'col22'), ('table', f'column_rename_add{table_name_suffix}')]),
OrderedDict([('id', 3), ('col1', 'col31'), ('middle', 'middle'),
('col2', 'col32'), ('table', f'column_rename_add{table_name_suffix}')]),
OrderedDict([('first', 0), ('second', 0), ('third', 0), ('table', f'reorder{table_name_suffix}')]),
OrderedDict([('second', 1), ('third', 1), ('first', 1), ('table', f'reorder{table_name_suffix}')]),
OrderedDict([('id', 4), ('removed', 'removed'), ('table', f'removed{table_name_suffix}')]),
OrderedDict([('id', 5), ('table', f'removed{table_name_suffix}')]),
OrderedDict([('id', 8), ('col', 'col_lower_case'), ('table', f'column_case{table_name_suffix}')]),
OrderedDict([('id', 9), ('COL', 'col_upper_case'), ('table', f'column_case{table_name_suffix}')]),
OrderedDict([('id', 10), ('col', 1), ('table', f'column_type_change{table_name_suffix}')]),
OrderedDict([('id', 11), ('col', 'col'), ('table', f'column_type_change{table_name_suffix}')])]
table_path_prefix = '/tmp/sdc/hive/warehouse/default/' if external_table else ''
table_path_template = table_path_prefix + "${record:attribute('table')}"
table_to_invalid_rows = {f'column_type_change{table_name_suffix}': [OrderedDict(
[('id', 11), ('col', 'col'), ('table', f'column_type_change{table_name_suffix}')])]}
table_to_raw_data = {k: [{col.lower(): val for col, val in v.items() if col != 'table'} for v in list(g)]
for k, g in groupby(sorted(raw_data, key=itemgetter('table')), key=itemgetter('table'))}
table_to_cols = {k: OrderedDict({col.lower(): col.lower() for v in list(g)
for col, val in v.items() if col != 'table'})
for k, g in groupby(sorted(raw_data, key=itemgetter('table')), key=itemgetter('table'))}
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'table',
'headerAttributeExpression': "${record:value('/table')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/table'])
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression='default',
external_table=external_table,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_path_template=table_path_template,
table_name='${record:attribute("table")}')
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build().configure_for_environment(cluster)
pipeline.configuration['errorRecordPolicy'] = 'ORIGINAL_RECORD'
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.error_records) == 1 # column_type_change
for table_name in table_to_raw_data.keys():
logger.info('Validating table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
table_columns_and_type = _get_table_columns_and_type(hive_cursor, None, table_name)
hive_table_columns = list(table_columns_and_type.keys())
data_table_cols = table_to_cols[table_name]
assert hive_table_columns == list(data_table_cols.keys())
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = sorted([list(row) for row in hive_cursor.fetchall()], key=itemgetter(0))
invalid_rows = ([[v for k, v in row.items() if k != 'table'] for row in table_to_invalid_rows[table_name]]
if table_name in table_to_invalid_rows else [])
expected_data = sorted([[data[col] if col in data else None for col in hive_table_columns]
for data in table_to_raw_data[table_name]
if list(data.values()) not in invalid_rows],
key=itemgetter(0))
assert hive_values == expected_data
error_values = [[fld for k, fld in error_record.field.items()] for error_record in wiretap.error_records]
assert error_values == [[v for k, v in row.items()]
for table_invalid_rows in table_to_invalid_rows.values()
for row in table_invalid_rows]
finally:
for table_name in table_to_raw_data.keys():
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
if external_table:
logger.info('Deleting Hadoop FS directory %s ...', table_path_prefix + table_name)
cluster.hdfs.client.delete(table_path_prefix + table_name, recursive=True)
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('data_format', ['AVRO', 'PARQUET'])
def test_unsupported_table_data_formats(sdc_builder, sdc_executor, cluster, data_format):
"""Validate Unsupported Data Formats. The pipeline looks like:
dev_raw_data_source >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = dict(id=str(uuid.uuid4()), name=get_random_string(string.ascii_lowercase, 20))
dev_raw_data_source_data = ''.join(json.dumps(raw_data))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format=data_format,
database_expression="default",
external_table=False,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = (pipeline_builder.build(title='Hive drift test - Unsupported Table Data Format Test')
.configure_for_environment(cluster))
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
hive_cursor.execute(
f'CREATE TABLE `{table_name}` (id int, value string) partitioned by (dt String) ROW FORMAT DELIMITED FIELDS TERMINATED BY \',\' STORED AS TEXTFILE')
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.error_records) == 1
logger.info('Validating table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
assert len([list(row) for row in hive_cursor.fetchall()]) == 0
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_drift_multiple_open_partitions(sdc_builder, sdc_executor, cluster):
"""Validate Multiple open partitions for the table. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [dict(id=str(uuid.uuid4()), part='part1'),
dict(id=str(uuid.uuid4()), part='part2'),
dict(id=str(uuid.uuid4()), part='part3', new_col='new_col31'),
dict(id=str(uuid.uuid4()), part='part4'),
dict(id=str(uuid.uuid4()), part='part5'),
dict(id=str(uuid.uuid4()), part='part6', new_col='new_col61'),
dict(id=str(uuid.uuid4()), part='part2', new_col='new_col21'),
dict(id=str(uuid.uuid4()), part='part6'),
dict(id=str(uuid.uuid4()), part='part4', new_col='new_col41'),
dict(id=str(uuid.uuid4()), part='part2', new_col='new_col22')]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'part',
'headerAttributeExpression': "${record:value('/part')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part'])
partition_configuration = [{'name': 'part', 'valueType': 'STRING', 'valueEL': '${record:attribute("part")}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression='default',
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = (pipeline_builder.build(title='Hive drift test - Multiple open partitions')
.configure_for_environment(cluster))
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
logger.info('Validating table %s in Hive...', table_name)
hive_cursor.execute('RELOAD {0}'.format(table_name))
table_columns = ['id', 'new_col', 'part']
hive_cursor.execute('SELECT * from {0}'.format(table_name))
hive_values = [list(row) for row in hive_cursor.fetchall()]
assert (sorted(hive_values, key=itemgetter(0)) ==
sorted([[r[col] if col in r else None for col in table_columns]
for r in raw_data], key=itemgetter(0)))
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE {0}'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_sub_partitions(sdc_builder, sdc_executor, cluster):
"""Validate Sub partitions. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_type_convertor >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [dict(id=1, name='abc', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
dict(id=2, name='def', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
dict(id=3, name='ghi', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'table_name',
'headerAttributeExpression': table_name}])
field_type_converter = pipeline_builder.add_stage('Field Type Converter')
field_type_converter.conversion_method = 'BY_FIELD'
field_type_converter.set_attributes(field_type_converter_configs=[{'fields': ['/timestamp'],
'targetType':'DATETIME',
'dateFormat':'YYYY_MM_DD_HH_MM_SS'}])
partition_configuration = [{'name': 'year', 'valueType': 'STRING', 'valueEL': '${YYYY()}'},
{'name': 'month', 'valueType': 'STRING', 'valueEL': '${MM()}'},
{'name': 'day', 'valueType': 'STRING', 'valueEL': '${DD()}'},
{'name': 'hour', 'valueType': 'STRING', 'valueEL': '${hh()}'},
{'name': 'minute', 'valueType': 'STRING', 'valueEL': '${mm()}'},
{'name': 'seconds', 'valueType': 'STRING', 'valueEL': '${ss()}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('db')}",
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name="${record:attribute('table_name')}",
time_basis="${record:value('/timestamp')}")
if Version(sdc_builder.version) >= Version('3.9.0'):
# Convert Timestamps To String has to be set like below as it was not working with the set_attributes() method.
# Probably it was not working due to having the stage defined in more than one lib that this has not been
# confirmed. Additionally it has to be set only for SDC versions >= 3.9.0 as it is when this new property
# is added.
hive_metadata.configuration.update({"convertTimesToString": True})
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
dev_raw_data_source >> expression_evaluator >> field_type_converter >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Cold Start').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
def split_date_time_string(datetime_str):
v = datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
return [datetime_str, v.strftime('%Y'), v.strftime('%m'),
v.strftime('%d'), v.strftime('%H'), v.strftime('%M'),
v.strftime('%S')]
raw_values = [list(chain.from_iterable([[v] if k != 'timestamp' else split_date_time_string(v)
for k, v in row.items()])) for row in raw_data]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(None, table_name)))
@sdc_min_version('3.9.0')
@cluster('cdh')
def test_native_parquet_timestamps(sdc_builder, sdc_executor, cluster):
"""Validate native timestamps can be written in a Hive Parquet table. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_type_convertor >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
"""
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [dict(id=1, name='abc', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
dict(id=2, name='def', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
dict(id=3, name='ghi', timestamp=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'table_name',
'headerAttributeExpression': table_name}])
field_type_converter = pipeline_builder.add_stage('Field Type Converter')
field_type_converter.conversion_method = 'BY_FIELD'
field_type_converter.set_attributes(field_type_converter_configs=[{'fields': ['/timestamp'],
'targetType':'DATETIME',
'dateFormat':'YYYY_MM_DD_HH_MM_SS'}])
partition_configuration = [{'name': 'year', 'valueType': 'STRING', 'valueEL': '${YYYY()}'},
{'name': 'month', 'valueType': 'STRING', 'valueEL': '${MM()}'},
{'name': 'day', 'valueType': 'STRING', 'valueEL': '${DD()}'},
{'name': 'hour', 'valueType': 'STRING', 'valueEL': '${hh()}'},
{'name': 'minute', 'valueType': 'STRING', 'valueEL': '${mm()}'},
{'name': 'seconds', 'valueType': 'STRING', 'valueEL': '${ss()}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='PARQUET',
database_expression="${record:attribute('db')}",
external_table=True,
partition_configuration=partition_configuration,
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name="${record:attribute('table_name')}",
time_basis="${record:value('/timestamp')}",
table_path_template=f"/tmp/{table_name}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True,
max_records_in_file=1)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
mapreduce = pipeline_builder.add_stage('MapReduce', type='executor')
mapreduce.set_attributes(job_type='AVRO_PARQUET',
output_directory="${file:parentPath(file:parentPath(record:value('/filepath')))}")
dev_raw_data_source >> expression_evaluator >> field_type_converter >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hadoop_fs >= mapreduce
pipeline = pipeline_builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Need to wait for 45 seconds until mapreduce jobs are finished.
time.sleep(120)
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
def split_date_time_string(datetime_str):
v = datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
return [v, v.strftime('%Y'), v.strftime('%m'),
v.strftime('%d'), v.strftime('%H'), v.strftime('%M'),
v.strftime('%S')]
raw_values = [list(chain.from_iterable([[v] if k != 'timestamp' else split_date_time_string(v)
for k, v in row.items()])) for row in raw_data]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(None, table_name)))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_events(sdc_builder, sdc_executor, cluster):
"""Validate Events from hive_metadata. The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hive_metastore >= wiretap.destination
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
raw_data = [dict(id=str(uuid.uuid4()), name='abc', part='part1'),
dict(id=str(uuid.uuid4()), name='def', col1='col1', part='part1'),
dict(id=str(uuid.uuid4()), name='ghi', col1='col1', part='part2'),
dict(id=str(uuid.uuid4()), name='jkl', col1='col1', col2='col2', part='part3')]
cols = ['id', 'name', 'col1', 'col2', 'part']
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = [{'attributeToSet': 'part',
'headerAttributeExpression': "${record:value('/part')}"}]
field_remover = pipeline_builder.add_stage('Field Remover')
field_remover.set_attributes(fields=['/part'])
partition_configuration = [{'name': 'part', 'valueType': 'STRING', 'valueEL': '${record:attribute("part")}'}]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression='default',
external_table=False,
partition_configuration=partition_configuration,
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name)
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> expression_evaluator >> field_remover >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
hive_metastore >= wiretap.destination
pipeline = (pipeline_builder.build(title='Hive drift test - Events Test')
.configure_for_environment(cluster))
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
expected_event_type_values = [('new-table', dict(table=_get_qualified_table_name('default', table_name),
columns=OrderedDict(id='STRING', name='STRING'),
partitions=OrderedDict(part='STRING'))),
('new-partition', dict(table=_get_qualified_table_name('default', table_name),
partition=OrderedDict(part='part1'))),
('new-columns', dict(table=_get_qualified_table_name('default', table_name),
columns=OrderedDict(col1='STRING'))),
('new-partition', dict(table=_get_qualified_table_name('default', table_name),
partition=OrderedDict(part='part2'))),
('new-columns', dict(table=_get_qualified_table_name('default', table_name),
columns=OrderedDict(col2='STRING'))),
('new-partition', dict(table=_get_qualified_table_name('default', table_name),
partition=OrderedDict(part='part3')))]
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.output_records) == 6
for event_idx in range(len(wiretap.output_records)):
event_record = wiretap.output_records[event_idx]
event_type, expected_event_values = expected_event_type_values[event_idx]
assert event_type == event_record.header['values']['sdc.event.type']
assert expected_event_values == event_record.field
logger.info('Validating table %s in Hive...', _get_qualified_table_name(None, table_name))
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(None, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(None, table_name)))
hive_values = sorted([list(row) for row in hive_cursor.fetchall()], key=itemgetter(0))
expected_values = sorted([[row[col] if col in row else None for col in cols] for row in raw_data],
key=itemgetter(0))
assert hive_values == expected_values
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
@pytest.mark.parametrize('stop_on_query_failure', [True, False])
def test_hive_query_executor(sdc_builder, sdc_executor, cluster, stop_on_query_failure):
"""Validate Hive Query Executor. The pipeline looks like:
dev_raw_data_source >> hive_query_executor >= wiretap.destination
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
raw_data = [dict(name='multiple_queries_all_success',
query1='select 11',
query2='select 12',
query3='select 13'),
dict(name='multiple_queries_failure_in_middle',
query1='select 21',
query2='select 22 from invalid_table',
query3='select 23')]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
hive_query_executor = pipeline_builder.add_stage('Hive Query', type='executor')
hive_query_executor.sql_queries = ['${record:value("/query1")}',
'${record:value("/query2")}',
'${record:value("/query3")}']
hive_query_executor.stop_on_query_failure = stop_on_query_failure
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> hive_query_executor >= wiretap.destination
pipeline = pipeline_builder.build(title='Hive Query Executor Test') .configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
if not stop_on_query_failure:
event_type_expected_query_values = [('successful-query', dict(query='select 11')),
('successful-query', dict(query='select 12')),
('successful-query', dict(query='select 13')),
('successful-query', dict(query='select 21')),
('failed-query', dict(query='select 22 from invalid_table')),
('successful-query', dict(query='select 23'))]
else:
event_type_expected_query_values = [('successful-query', dict(query='select 11')),
('successful-query', dict(query='select 12')),
('successful-query', dict(query='select 13')),
('successful-query', dict(query='select 21')),
('failed-query', dict([('query', 'select 22 from invalid_table'),
('unexecuted-queries', ['select 23'])]))]
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.output_records) == len(event_type_expected_query_values)
for event_idx in range(len(wiretap.output_records)):
event_record = wiretap.output_records[event_idx]
expected_event_type, expected_event_values = event_type_expected_query_values[event_idx]
assert expected_event_type == event_record.header['values']["sdc.event.type"]
assert expected_event_values == wiretap.output_records[event_idx].field
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_hive_avro_schema_contains_only_columninfo(sdc_builder, sdc_executor, cluster):
"""Validate that the avro Schema in records contains strictly hive table column information
and not partition information.
Currently, we use the "DESC <tableName>" query to gather table information
results in partition names appear twice in the resultset - one with column info and one with partiton info,
so they are counted twice.
This behavior is fixed as a part of SDC-13898
dev_raw_data_source >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
id, username = 'id', 'username'
raw_data = json.dumps({id: 1, username: 'abc'})
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=raw_data,
stop_after_first_batch=True)
partition_name = 'partition1'
partition_configuration = [
{'name': partition_name, 'valueType': 'STRING', 'valueEL': '${record:value("/username")}'}
]
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
partition_configuration=partition_configuration,
external_table=True,
table_name=table_name,
table_path_template=f"/tmp/{table_name}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
wiretap = pipeline_builder.add_wiretap()
dev_raw_data_source >> hive_metadata
hive_metadata >> [hadoop_fs, wiretap.destination]
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
assert len(wiretap.output_records) == 1
avro_schema = json.loads(wiretap.output_records[0].header['values']['avroSchema'])
column_list = [field['name'] for field in avro_schema['fields']]
assert id and username in column_list and partition_name not in column_list
finally:
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE {}'.format(table_name))
@sdc_min_version('3.0.0.0')
@cluster('cdh', 'hdp')
def test_hdfs_avro_update_schema_stored_externally(sdc_builder, sdc_executor, cluster):
"""Validate that Hive schema stored as AVRO at external location is correctly updated,
when there is a column update/addition to the Hive table
The test populates/creates a table with 2 columns: 'name' and 'id'
Then the pipeline is stopped, and a new field 'age' is added
Verify that the Hive table is updated with new column and that a new AVRO schema file is generated and stored
at external location
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
avro_dir = f'/tmp/{get_random_string(string.ascii_lowercase, 20)}'
raw_data = dict(id=1, name="testuser")
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source', type='origin')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=json.dumps(raw_data),
stop_after_first_batch=True)
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="default",
external_table=True,
partition_configuration=[],
decimal_scale_expression='2',
decimal_precision_expression='4',
table_name=table_name,
table_path_template=f"/tmp/{table_name}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
hive_metastore.set_attributes(stored_as_avro=False, schema_folder_location=avro_dir)
dev_raw_data_source >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build().configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
status = cluster.hdfs.client.status(avro_dir)
assert status is not None and status['type'] == 'DIRECTORY'
avro_dir_filelist = cluster.hdfs.client.list(avro_dir)
assert len(avro_dir_filelist) == 1
# Stop and restart the pipeline after adding another element in dev_raw_data_source to create an new column
# Because of a bug in update_pipeline, we need to re-get a reference to the stage from pipeline,
# before we can call update_pipeline
dev_raw_data_source = pipeline.stages.get(label=dev_raw_data_source.label)
raw_data['age'] = 25 # Add a new element to create a new column in table
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=json.dumps(raw_data),
stop_after_first_batch=True)
sdc_executor.update_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
avro_dir_filelist = cluster.hdfs.client.list(avro_dir)
assert len(avro_dir_filelist) == 2
table_column_list = list(_get_table_columns_and_type(hive_cursor, None, table_name).keys())
assert 'age' in table_column_list
finally:
# Delete schema location
logger.info('Deleting Schema Directory %s in Hadoop FS...', avro_dir)
cluster.hdfs.client.delete(avro_dir, recursive=True)
logger.info('Dropping table %s in Hive...', table_name)
hive_cursor.execute('DROP TABLE `{0}`'.format(table_name))
def _get_qualified_table_name(db, table_name):
return f'`{db}`.`{table_name}`' if db else f'`{table_name}`'
def _get_table_location(hive_cursor, db, table_name):
hive_cursor.execute('DESC FORMATTED {0}'.format(_get_qualified_table_name(db, table_name)))
rows = [row for row in hive_cursor.fetchall()]
url_result = urlparse([r[1] for r in rows if 'Location:' in r[0]][0])
return url_result.path
def _get_table_columns_and_type(hive_cursor, db, table_name):
hive_cursor.execute(f'DESC {_get_qualified_table_name(db, table_name)}')
return OrderedDict({col_name: col_type for col_name, col_type, comment in hive_cursor.fetchall()})
def _get_hive_warehouse_dir(hive_cursor):
"""Return the directory path used by default to store databases in Hive."""
hive_cursor.execute("SET hive.metastore.warehouse.dir")
return hive_cursor.fetchone()[0].split('=')[1]
def _get_hive_warehouse_external_dir(hive_cursor):
"""Return the directory path used by default to store external databases in Hive."""
hive_cursor.execute("SET hive.metastore.warehouse.external.dir")
return hive_cursor.fetchone()[0].split('=')[1]
| 52.916821
| 179
| 0.632868
| 13,353
| 114,512
| 5.139594
| 0.052498
| 0.037375
| 0.020254
| 0.032406
| 0.794883
| 0.762243
| 0.739206
| 0.71821
| 0.701642
| 0.687013
| 0
| 0.012311
| 0.254454
| 114,512
| 2,163
| 180
| 52.941285
| 0.791552
| 0.080795
| 0
| 0.664747
| 0
| 0.016635
| 0.18296
| 0.039163
| 0
| 0
| 0
| 0.000462
| 0.033269
| 1
| 0.020473
| false
| 0
| 0.010877
| 0.00064
| 0.044786
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed8865b3d63978652696b18b00965d7c0d05d8f4
| 152
|
py
|
Python
|
CURSOEMVIDEO/ex007.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
CURSOEMVIDEO/ex007.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
CURSOEMVIDEO/ex007.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
n1 = float (input('Entre com a 1º nota: '))
n2 = float (input('Entre com a 2º nota: '))
r = (n1 + n2) /2
print ('A média do Aluno é: {:.1f}'.format(r))
| 30.4
| 46
| 0.578947
| 29
| 152
| 3.034483
| 0.655172
| 0.227273
| 0.340909
| 0.409091
| 0.431818
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 0.203947
| 152
| 4
| 47
| 38
| 0.661157
| 0
| 0
| 0
| 0
| 0
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed8ae68777594c3f89e69435060dd4a4f965fa5a
| 59
|
py
|
Python
|
boa3_test/test_sc/logical_test/MismatchedOperandLogicOr.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/logical_test/MismatchedOperandLogicOr.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/logical_test/MismatchedOperandLogicOr.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
def Main(a: bool, b: Tuple[str]) -> bool:
return a | b
| 19.666667
| 41
| 0.559322
| 11
| 59
| 3
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254237
| 59
| 2
| 42
| 29.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
71f3e86fb18db96f34287d7fc4d71d9c09788c68
| 12,644
|
py
|
Python
|
u-boot-2019.01+gitAUTOINC+333c3e72d3-g333c3e72d3/test/py/tests/test_fs/test_basic.py
|
rlourette/TI_SDK_u-boot-2019.01
|
3000a07c021e84d717e6792a74efcf895a7d7188
|
[
"MIT"
] | 1
|
2021-11-21T19:56:29.000Z
|
2021-11-21T19:56:29.000Z
|
u-boot-2019.01+gitAUTOINC+333c3e72d3-g333c3e72d3/test/py/tests/test_fs/test_basic.py
|
rlourette/TI_SDK_u-boot-2019.01
|
3000a07c021e84d717e6792a74efcf895a7d7188
|
[
"MIT"
] | null | null | null |
u-boot-2019.01+gitAUTOINC+333c3e72d3-g333c3e72d3/test/py/tests/test_fs/test_basic.py
|
rlourette/TI_SDK_u-boot-2019.01
|
3000a07c021e84d717e6792a74efcf895a7d7188
|
[
"MIT"
] | 1
|
2021-12-15T09:54:37.000Z
|
2021-12-15T09:54:37.000Z
|
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018, Linaro Limited
# Author: Takahiro Akashi <takahiro.akashi@linaro.org>
#
# U-Boot File System:Basic Test
"""
This test verifies basic read/write operation on file system.
"""
import pytest
import re
from fstest_defs import *
from fstest_helpers import assert_fs_integrity
@pytest.mark.boardspec('sandbox')
@pytest.mark.slow
class TestFsBasic(object):
def test_fs1(self, u_boot_console, fs_obj_basic):
"""
Test Case 1 - ls command, listing a root directory and invalid directory
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 1a - ls'):
# Test Case 1 - ls
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sls host 0:0' % fs_type])
assert(re.search('2621440000 *%s' % BIG_FILE, ''.join(output)))
assert(re.search('1048576 *%s' % SMALL_FILE, ''.join(output)))
with u_boot_console.log.section('Test Case 1b - ls (invalid dir)'):
# In addition, test with a nonexistent directory to see if we crash.
output = u_boot_console.run_command(
'%sls host 0:0 invalid_d' % fs_type)
if fs_type == 'ext4':
assert('Can not find directory' in output)
else:
assert('' == output)
def test_fs2(self, u_boot_console, fs_obj_basic):
"""
Test Case 2 - size command for a small file
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 2a - size (small)'):
# 1MB is 0x0010 0000
# Test Case 2a - size of small file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%ssize host 0:0 /%s' % (fs_type, SMALL_FILE),
'printenv filesize',
'setenv filesize'])
assert('filesize=100000' in ''.join(output))
with u_boot_console.log.section('Test Case 2b - size (/../<file>)'):
# Test Case 2b - size of small file via a path using '..'
output = u_boot_console.run_command_list([
'%ssize host 0:0 /SUBDIR/../%s' % (fs_type, SMALL_FILE),
'printenv filesize',
'setenv filesize'])
assert('filesize=100000' in ''.join(output))
def test_fs3(self, u_boot_console, fs_obj_basic):
"""
Test Case 3 - size command for a large file
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 3 - size (large)'):
# 2.5GB (1024*1024*2500) is 0x9C40 0000
# Test Case 3 - size of big file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%ssize host 0:0 /%s' % (fs_type, BIG_FILE),
'printenv filesize',
'setenv filesize'])
assert('filesize=9c400000' in ''.join(output))
def test_fs4(self, u_boot_console, fs_obj_basic):
"""
Test Case 4 - load a small file, 1MB
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 4 - load (small)'):
# Test Case 4a - Read full 1MB of small file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 4b - Read full 1MB of small file
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[0] in ''.join(output))
def test_fs5(self, u_boot_console, fs_obj_basic):
"""
Test Case 5 - load, reading first 1MB of 3GB file
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 5 - load (first 1MB)'):
# Test Case 5a - First 1MB of big file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s %x 0x0' % (fs_type, ADDR, BIG_FILE, LENGTH),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 5b - First 1MB of big file
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[1] in ''.join(output))
def test_fs6(self, u_boot_console, fs_obj_basic):
"""
Test Case 6 - load, reading last 1MB of 3GB file
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 6 - load (last 1MB)'):
# fails for ext as no offset support
# Test Case 6a - Last 1MB of big file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s %x 0x9c300000'
% (fs_type, ADDR, BIG_FILE, LENGTH),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 6b - Last 1MB of big file
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[2] in ''.join(output))
def test_fs7(self, u_boot_console, fs_obj_basic):
"""
Test Case 7 - load, 1MB from the last 1MB in 2GB
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 7 - load (last 1MB in 2GB)'):
# fails for ext as no offset support
# Test Case 7a - One from the last 1MB chunk of 2GB
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s %x 0x7ff00000'
% (fs_type, ADDR, BIG_FILE, LENGTH),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 7b - One from the last 1MB chunk of 2GB
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[3] in ''.join(output))
def test_fs8(self, u_boot_console, fs_obj_basic):
"""
Test Case 8 - load, reading first 1MB in 2GB
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 8 - load (first 1MB in 2GB)'):
# fails for ext as no offset support
# Test Case 8a - One from the start 1MB chunk from 2GB
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s %x 0x80000000'
% (fs_type, ADDR, BIG_FILE, LENGTH),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 8b - One from the start 1MB chunk from 2GB
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[4] in ''.join(output))
def test_fs9(self, u_boot_console, fs_obj_basic):
"""
Test Case 9 - load, 1MB crossing 2GB boundary
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 9 - load (crossing 2GB boundary)'):
# fails for ext as no offset support
# Test Case 9a - One 1MB chunk crossing the 2GB boundary
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s %x 0x7ff80000'
% (fs_type, ADDR, BIG_FILE, LENGTH),
'printenv filesize'])
assert('filesize=100000' in ''.join(output))
# Test Case 9b - One 1MB chunk crossing the 2GB boundary
output = u_boot_console.run_command_list([
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[5] in ''.join(output))
def test_fs10(self, u_boot_console, fs_obj_basic):
"""
Test Case 10 - load, reading beyond file end'):
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 10 - load (beyond file end)'):
# Generic failure case
# Test Case 10 - 2MB chunk from the last 1MB of big file
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s 0x00200000 0x9c300000'
% (fs_type, ADDR, BIG_FILE),
'printenv filesize',
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert('filesize=100000' in ''.join(output))
def test_fs11(self, u_boot_console, fs_obj_basic):
"""
Test Case 11 - write'
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 11 - write'):
# Read 1MB from small file
# Write it back to test the writes
# Test Case 11a - Check that the write succeeded
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
'%swrite host 0:0 %x /%s.w $filesize'
% (fs_type, ADDR, SMALL_FILE)])
assert('1048576 bytes written' in ''.join(output))
# Test Case 11b - Check md5 of written to is same
# as the one read from
output = u_boot_console.run_command_list([
'%sload host 0:0 %x /%s.w' % (fs_type, ADDR, SMALL_FILE),
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[0] in ''.join(output))
assert_fs_integrity(fs_type, fs_img)
def test_fs12(self, u_boot_console, fs_obj_basic):
"""
Test Case 12 - write to "." directory
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 12 - write (".")'):
# Next test case checks writing a file whose dirent
# is the first in the block, which is always true for "."
# The write should fail, but the lookup should work
# Test Case 12 - Check directory traversal
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%swrite host 0:0 %x /. 0x10' % (fs_type, ADDR)])
assert('Unable to write' in ''.join(output))
assert_fs_integrity(fs_type, fs_img)
def test_fs13(self, u_boot_console, fs_obj_basic):
"""
Test Case 13 - write to a file with "/./<filename>"
"""
fs_type,fs_img,md5val = fs_obj_basic
with u_boot_console.log.section('Test Case 13 - write ("./<file>")'):
# Read 1MB from small file
# Write it via "same directory", i.e. "." dirent
# Test Case 13a - Check directory traversal
output = u_boot_console.run_command_list([
'host bind 0 %s' % fs_img,
'%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE),
'%swrite host 0:0 %x /./%s2 $filesize'
% (fs_type, ADDR, SMALL_FILE)])
assert('1048576 bytes written' in ''.join(output))
# Test Case 13b - Check md5 of written to is same
# as the one read from
output = u_boot_console.run_command_list([
'mw.b %x 00 100' % ADDR,
'%sload host 0:0 %x /./%s2' % (fs_type, ADDR, SMALL_FILE),
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[0] in ''.join(output))
# Test Case 13c - Check md5 of written to is same
# as the one read from
output = u_boot_console.run_command_list([
'mw.b %x 00 100' % ADDR,
'%sload host 0:0 %x /%s2' % (fs_type, ADDR, SMALL_FILE),
'md5sum %x $filesize' % ADDR,
'setenv filesize'])
assert(md5val[0] in ''.join(output))
assert_fs_integrity(fs_type, fs_img)
| 43.153584
| 86
| 0.548481
| 1,667
| 12,644
| 3.973005
| 0.138572
| 0.040012
| 0.094217
| 0.065227
| 0.760079
| 0.738336
| 0.726106
| 0.703005
| 0.703005
| 0.625396
| 0
| 0.050392
| 0.343958
| 12,644
| 292
| 87
| 43.30137
| 0.748041
| 0.194954
| 0
| 0.623596
| 0
| 0
| 0.207388
| 0
| 0
| 0
| 0.006818
| 0
| 0.168539
| 1
| 0.073034
| false
| 0
| 0.022472
| 0
| 0.101124
| 0.05618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c02137b6baeb9ee5bfef9eeca56974ababf35c0
| 1,119
|
py
|
Python
|
rotkehlchen/tests/fixtures/__init__.py
|
VoR0220/rotki
|
0ba3073c3d71ff8bf0800841c5e1097243f46b66
|
[
"BSD-3-Clause"
] | 1
|
2020-12-12T01:41:19.000Z
|
2020-12-12T01:41:19.000Z
|
rotkehlchen/tests/fixtures/__init__.py
|
VoR0220/rotki
|
0ba3073c3d71ff8bf0800841c5e1097243f46b66
|
[
"BSD-3-Clause"
] | null | null | null |
rotkehlchen/tests/fixtures/__init__.py
|
VoR0220/rotki
|
0ba3073c3d71ff8bf0800841c5e1097243f46b66
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# flake8: noqa
from rotkehlchen.tests.fixtures.aave import *
from rotkehlchen.tests.fixtures.accounting import *
from rotkehlchen.tests.fixtures.assets import *
from rotkehlchen.tests.fixtures.blockchain import *
from rotkehlchen.tests.fixtures.db import *
from rotkehlchen.tests.fixtures.exchanges.binance import *
from rotkehlchen.tests.fixtures.exchanges.bitmex import *
from rotkehlchen.tests.fixtures.exchanges.bitstamp import *
from rotkehlchen.tests.fixtures.exchanges.bittrex import *
from rotkehlchen.tests.fixtures.exchanges.coinbase import *
from rotkehlchen.tests.fixtures.exchanges.coinbasepro import *
from rotkehlchen.tests.fixtures.exchanges.gemini import *
from rotkehlchen.tests.fixtures.exchanges.kraken import *
from rotkehlchen.tests.fixtures.exchanges.poloniex import *
from rotkehlchen.tests.fixtures.greenlets import *
from rotkehlchen.tests.fixtures.history import *
from rotkehlchen.tests.fixtures.messages import *
from rotkehlchen.tests.fixtures.pylint import *
from rotkehlchen.tests.fixtures.rotkehlchen import *
from rotkehlchen.tests.fixtures.variables import *
| 46.625
| 62
| 0.831099
| 134
| 1,119
| 6.940299
| 0.223881
| 0.322581
| 0.430108
| 0.602151
| 0.78172
| 0.416129
| 0
| 0
| 0
| 0
| 0
| 0.001942
| 0.079535
| 1,119
| 23
| 63
| 48.652174
| 0.900971
| 0.030384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9c07619fad93c0332dad6919b302f7c3b1fed04b
| 46
|
py
|
Python
|
unsorted/pythonsnippets_0022.py
|
fiddlerwoaroof/sandbox
|
652acaf710a8b60f005769bde317e7bbf548cc2b
|
[
"BSD-3-Clause"
] | null | null | null |
unsorted/pythonsnippets_0022.py
|
fiddlerwoaroof/sandbox
|
652acaf710a8b60f005769bde317e7bbf548cc2b
|
[
"BSD-3-Clause"
] | null | null | null |
unsorted/pythonsnippets_0022.py
|
fiddlerwoaroof/sandbox
|
652acaf710a8b60f005769bde317e7bbf548cc2b
|
[
"BSD-3-Clause"
] | null | null | null |
genetics.py -- python/ga
python -i genetics.py
| 23
| 24
| 0.76087
| 8
| 46
| 4.375
| 0.625
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 2
| 25
| 23
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c0c211f51578e12d01d7512d736653e4f46839c
| 138
|
py
|
Python
|
example/plugins/testplugin/testplugin.py
|
zx013/pyvoxel
|
4d886b25a3430dbd54863bf433c2eebbd030d0f4
|
[
"MIT"
] | null | null | null |
example/plugins/testplugin/testplugin.py
|
zx013/pyvoxel
|
4d886b25a3430dbd54863bf433c2eebbd030d0f4
|
[
"MIT"
] | null | null | null |
example/plugins/testplugin/testplugin.py
|
zx013/pyvoxel
|
4d886b25a3430dbd54863bf433c2eebbd030d0f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pyvoxel.plugin import Plugin
class TestPlugin(Plugin):
def test(self):
return 'test user'
| 19.714286
| 34
| 0.615942
| 17
| 138
| 5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.253623
| 138
| 6
| 35
| 23
| 0.815534
| 0.152174
| 0
| 0
| 0
| 0
| 0.082569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
9c15c1f0b047da812f9ebfa893ac69dd7ae813a5
| 760
|
py
|
Python
|
tests/core/eth-module/test_gas_pricing.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | null | null | null |
tests/core/eth-module/test_gas_pricing.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | 1
|
2022-02-17T20:28:58.000Z
|
2022-02-17T20:28:58.000Z
|
tests/core/eth-module/test_gas_pricing.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | null | null | null |
from unittest.mock import (
Mock,
)
def test_get_set_gas_price(w3):
assert w3.eth.gas_price > 0
def test_no_gas_price_strategy_returns_none(w3):
assert w3.eth.generate_gas_price() is None
def test_set_gas_price_strategy(w3):
def my_gas_price_strategy(w3, transaction_params):
return 5
w3.eth.set_gas_price_strategy(my_gas_price_strategy)
assert w3.eth.generate_gas_price() == 5
def test_gas_price_strategy_calls(w3):
transaction = {
'to': '0x0',
'value': 1000000000
}
my_gas_price_strategy = Mock(return_value=5)
w3.eth.set_gas_price_strategy(my_gas_price_strategy)
assert w3.eth.generate_gas_price(transaction) == 5
my_gas_price_strategy.assert_called_once_with(w3, transaction)
| 25.333333
| 66
| 0.740789
| 117
| 760
| 4.376068
| 0.282051
| 0.234375
| 0.3125
| 0.175781
| 0.373047
| 0.326172
| 0.273438
| 0.273438
| 0.273438
| 0.273438
| 0
| 0.046032
| 0.171053
| 760
| 29
| 67
| 26.206897
| 0.766667
| 0
| 0
| 0.095238
| 1
| 0
| 0.013158
| 0
| 0
| 0
| 0.003947
| 0
| 0.238095
| 1
| 0.238095
| false
| 0
| 0.047619
| 0.047619
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c1aeca5ed1dd36903dba1e615397cafebd1c0db
| 94
|
py
|
Python
|
__init__.py
|
hanscje/PySearch
|
22d551f5fa5c41cc82e9e8205f0dcea78ede9338
|
[
"MIT"
] | null | null | null |
__init__.py
|
hanscje/PySearch
|
22d551f5fa5c41cc82e9e8205f0dcea78ede9338
|
[
"MIT"
] | null | null | null |
__init__.py
|
hanscje/PySearch
|
22d551f5fa5c41cc82e9e8205f0dcea78ede9338
|
[
"MIT"
] | null | null | null |
"""
TODO: Shal check that all the needed packages are available before running the program
"""
| 31.333333
| 86
| 0.765957
| 14
| 94
| 5.142857
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 94
| 3
| 87
| 31.333333
| 0.911392
| 0.914894
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.333333
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c80598b71fdb4da231003c90b85f9b59722298e
| 54
|
py
|
Python
|
tests/core/__init__.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | 1
|
2020-12-02T09:52:28.000Z
|
2020-12-02T09:52:28.000Z
|
tests/core/__init__.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | 1
|
2019-05-14T04:08:43.000Z
|
2019-05-14T04:08:43.000Z
|
tests/core/__init__.py
|
ShepardZhao/rancher
|
a747ac408ca34fb0bf465276f07557ec43bf9c89
|
[
"Apache-2.0"
] | 2
|
2019-11-14T15:46:01.000Z
|
2020-05-06T15:31:37.000Z
|
import pytest
pytest.register_assert_rewrite('core')
| 13.5
| 38
| 0.833333
| 7
| 54
| 6.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 54
| 3
| 39
| 18
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92bc06e00a7adc7104969a186aacc3e55402a196
| 265
|
py
|
Python
|
weibo_base/__init__.py
|
guangfeizhao/weibo-scraper
|
f971182fc00754b36c4bab1d8584daa825f80817
|
[
"MIT"
] | 1
|
2020-12-14T05:36:58.000Z
|
2020-12-14T05:36:58.000Z
|
weibo_base/__init__.py
|
billy3321/weibo-scraper
|
8d1ffc9060ae7ddba217093c23ab8111f9b351c8
|
[
"MIT"
] | null | null | null |
weibo_base/__init__.py
|
billy3321/weibo-scraper
|
8d1ffc9060ae7ddba217093c23ab8111f9b351c8
|
[
"MIT"
] | 1
|
2020-02-08T20:14:20.000Z
|
2020-02-08T20:14:20.000Z
|
# -*- coding:utf-8 -*-
"""
Author: Helixcs
Site: https://iliangqunru.bitcron.com/
File: __init__.py.py
Time: 5/19/18
"""
from .weibo_typing import *
from .weibo_api import *
from .weibo_component import *
from .weibo_util import *
from .weibo_parser import *
| 17.666667
| 39
| 0.701887
| 38
| 265
| 4.657895
| 0.657895
| 0.254237
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.150943
| 265
| 14
| 40
| 18.928571
| 0.76
| 0.418868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92c7c76b91bced85ad2f08a1980e1d2afc806d83
| 144
|
py
|
Python
|
edza_req.py
|
Edward-Zion-Saji/Edza-3
|
2d0169a50c60088d953c470e15707a9f3c0965aa
|
[
"Apache-2.0"
] | null | null | null |
edza_req.py
|
Edward-Zion-Saji/Edza-3
|
2d0169a50c60088d953c470e15707a9f3c0965aa
|
[
"Apache-2.0"
] | null | null | null |
edza_req.py
|
Edward-Zion-Saji/Edza-3
|
2d0169a50c60088d953c470e15707a9f3c0965aa
|
[
"Apache-2.0"
] | null | null | null |
import os
import pyttsx3
import speech_recognition as sr
import wikipedia
import aiml
import wx.adv
import wolframalpha
import winsound
| 16
| 32
| 0.8125
| 20
| 144
| 5.8
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.180556
| 144
| 9
| 33
| 16
| 0.974576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92fd7e8b278eea84275d03cc6b62aba61a60c4dd
| 173
|
py
|
Python
|
botshop/pytorch/__init__.py
|
nuhame/botshop
|
e942ff41c0c5f22bb6232afa5975c51a0238ab9f
|
[
"Apache-2.0"
] | null | null | null |
botshop/pytorch/__init__.py
|
nuhame/botshop
|
e942ff41c0c5f22bb6232afa5975c51a0238ab9f
|
[
"Apache-2.0"
] | null | null | null |
botshop/pytorch/__init__.py
|
nuhame/botshop
|
e942ff41c0c5f22bb6232afa5975c51a0238ab9f
|
[
"Apache-2.0"
] | null | null | null |
from botshop import *
from .conversation_engine import BasicConversationEngine
from .sample_and_rank import SampleAndRankConversationEngine
import botshop.pytorch.utils
| 19.222222
| 60
| 0.867052
| 18
| 173
| 8.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104046
| 173
| 8
| 61
| 21.625
| 0.948387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
134bf62abb9dda4119951ce36a1104460f764cd8
| 201
|
py
|
Python
|
python/speech/cena.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 1
|
2022-03-30T12:59:44.000Z
|
2022-03-30T12:59:44.000Z
|
python/speech/cena.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | null | null | null |
python/speech/cena.py
|
imjoseangel/100DaysOfCode
|
bff90569033e2b02a56e893bd45727125962aeb3
|
[
"MIT"
] | 3
|
2019-08-13T11:33:36.000Z
|
2022-03-08T22:00:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Speech Recognition"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
print('Háztela Tú')
| 25.125
| 66
| 0.641791
| 22
| 201
| 5.545455
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.21393
| 201
| 7
| 67
| 28.714286
| 0.765823
| 0.303483
| 0
| 0
| 0
| 0
| 0.075188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
135f83261e9af7e11be4384a033f1025aeed9ab5
| 111
|
py
|
Python
|
tvguide/__init__.py
|
Crinibus/tvguide
|
869354bfa6ca574696b7d3da966af769459c42ab
|
[
"MIT"
] | null | null | null |
tvguide/__init__.py
|
Crinibus/tvguide
|
869354bfa6ca574696b7d3da966af769459c42ab
|
[
"MIT"
] | null | null | null |
tvguide/__init__.py
|
Crinibus/tvguide
|
869354bfa6ca574696b7d3da966af769459c42ab
|
[
"MIT"
] | null | null | null |
from .format import Format
from .argument import argparse_setup
from .API import API
from .Config import Config
| 27.75
| 36
| 0.828829
| 17
| 111
| 5.352941
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 111
| 4
| 37
| 27.75
| 0.947917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13c2ad209690581366f7010427eff2edb3815d42
| 438
|
py
|
Python
|
NoteBooks/Curso de Flask/Ex_Files_Full_Stack_Dev_Flask/Exercise Files/Section 3/3.3 Start/application/routes.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | 1
|
2021-02-26T13:12:22.000Z
|
2021-02-26T13:12:22.000Z
|
NoteBooks/Curso de Flask/Ex_Files_Full_Stack_Dev_Flask/Exercise Files/Section 3/3.3 Start/application/routes.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
NoteBooks/Curso de Flask/Ex_Files_Full_Stack_Dev_Flask/Exercise Files/Section 3/3.3 Start/application/routes.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
from application import app
from flask import render_template
@app.route("/")
@app.route("/index")
@app.route("/home")
def index():
return render_template("index.html" )
@app.route("/login")
def login():
return render_template("login.html" )
@app.route("/courses")
def courses():
return render_template("courses.html" )
@app.route("/register")
def register():
return render_template("register.html")
| 21.9
| 44
| 0.66895
| 54
| 438
| 5.333333
| 0.296296
| 0.166667
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1621
| 438
| 20
| 45
| 21.9
| 0.784741
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.125
| 0.25
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b94d37923b5d8a093aef73cd31115cdfdbf231cc
| 4,340
|
py
|
Python
|
python_modules/libraries/dagster-celery-docker/dagster_celery_docker_tests/test_execute_docker.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-celery-docker/dagster_celery_docker_tests/test_execute_docker.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-celery-docker/dagster_celery_docker_tests/test_execute_docker.py
|
asamoal/dagster
|
08fad28e4b608608ce090ce2e8a52c2cf9dd1b64
|
[
"Apache-2.0"
] | null | null | null |
# pylint doesn't know about pytest fixtures
# pylint: disable=unused-argument
import os
from contextlib import contextmanager
from dagster_test.test_project import (
find_local_test_image,
get_buildkite_registry_config,
get_test_project_docker_image,
get_test_project_environments_path,
get_test_project_recon_pipeline,
)
from dagster import execute_pipeline
from dagster.utils import merge_dicts
from dagster.utils.test.postgres_instance import postgres_instance_for_test
from dagster.utils.yaml_utils import merge_yamls
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
@contextmanager
def celery_docker_postgres_instance(overrides=None):
with postgres_instance_for_test(
__file__, "test-postgres-db-celery-docker", overrides=overrides
) as instance:
yield instance
def test_execute_celery_docker_image_on_executor_config(aws_creds):
docker_image = get_test_project_docker_image()
docker_config = {
"image": docker_image,
"network": "container:test-postgres-db-celery-docker",
"container_kwargs": {
"environment": {
"FIND_ME": "here!",
"AWS_ACCESS_KEY_ID": aws_creds["aws_access_key_id"],
"AWS_SECRET_ACCESS_KEY": aws_creds["aws_secret_access_key"],
},
# "auto_remove": False # uncomment when debugging to view container logs after execution
},
}
if IS_BUILDKITE:
docker_config["registry"] = get_buildkite_registry_config()
else:
find_local_test_image(docker_image)
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
os.path.join(get_test_project_environments_path(), "env_environment_vars.yaml"),
]
),
{
"execution": {
"celery-docker": {
"config": {
"docker": docker_config,
"config_source": {"task_always_eager": True},
}
}
},
},
)
with celery_docker_postgres_instance() as instance:
result = execute_pipeline(
get_test_project_recon_pipeline("docker_celery_pipeline"),
run_config=run_config,
instance=instance,
)
assert result.success
assert result.result_for_solid("get_environment_solid").output_value("result") == "here!"
def test_execute_celery_docker_image_on_pipeline_config(aws_creds):
docker_image = get_test_project_docker_image()
docker_config = {
"network": "container:test-postgres-db-celery-docker",
"container_kwargs": {
"environment": [
"FIND_ME=here!",
f"AWS_ACCESS_KEY_ID={aws_creds['aws_access_key_id']}",
f"AWS_SECRET_ACCESS_KEY={aws_creds['aws_secret_access_key']}",
],
# "auto_remove": False # uncomment when debugging to view container logs after execution
},
}
if IS_BUILDKITE:
docker_config["registry"] = get_buildkite_registry_config()
else:
find_local_test_image(docker_image)
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
os.path.join(get_test_project_environments_path(), "env_environment_vars.yaml"),
]
),
{
"execution": {
"celery-docker": {
"config": {
"docker": docker_config,
"config_source": {"task_always_eager": True},
}
}
},
},
)
with celery_docker_postgres_instance() as instance:
result = execute_pipeline(
get_test_project_recon_pipeline("docker_celery_pipeline", docker_image),
run_config=run_config,
instance=instance,
)
assert result.success
assert result.result_for_solid("get_environment_solid").output_value("result") == "here!"
| 33.384615
| 100
| 0.618203
| 462
| 4,340
| 5.376623
| 0.205628
| 0.061997
| 0.073269
| 0.073269
| 0.773752
| 0.730274
| 0.730274
| 0.703704
| 0.703704
| 0.703704
| 0
| 0.000648
| 0.289171
| 4,340
| 129
| 101
| 33.643411
| 0.804538
| 0.056452
| 0
| 0.46729
| 0
| 0
| 0.181262
| 0.096869
| 0
| 0
| 0
| 0
| 0.037383
| 1
| 0.028037
| false
| 0
| 0.065421
| 0
| 0.093458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9835fdbdb90e73b51a7a06b1f73966fa2ffea6c
| 169
|
py
|
Python
|
test/test_del_group.py
|
ozerovadiana/python_training
|
a5f9e0795a64326f04bad961bf10e6829e25b957
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_group.py
|
ozerovadiana/python_training
|
a5f9e0795a64326f04bad961bf10e6829e25b957
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_group.py
|
ozerovadiana/python_training
|
a5f9e0795a64326f04bad961bf10e6829e25b957
|
[
"Apache-2.0"
] | null | null | null |
def test_delete_first__group(app):
app.session.open_home_page()
app.session.login("admin", "secret")
app.group.delete_first_group()
app.session.logout()
| 28.166667
| 40
| 0.721893
| 24
| 169
| 4.75
| 0.583333
| 0.263158
| 0.280702
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136095
| 169
| 5
| 41
| 33.8
| 0.780822
| 0
| 0
| 0
| 0
| 0
| 0.065089
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9b83f30748ac4976035442107646aee680f0bd5
| 6,867
|
py
|
Python
|
tests/api/v1/challenges/requirements/test_requirements.py
|
kleprevost/CTFd
|
673e61c418cedfe28f5b32701e650fd89d54f737
|
[
"Apache-2.0"
] | 3,592
|
2017-03-12T19:44:07.000Z
|
2022-03-30T16:03:33.000Z
|
tests/api/v1/challenges/requirements/test_requirements.py
|
kleprevost/CTFd
|
673e61c418cedfe28f5b32701e650fd89d54f737
|
[
"Apache-2.0"
] | 1,648
|
2017-03-12T23:44:34.000Z
|
2022-03-31T15:28:38.000Z
|
tests/api/v1/challenges/requirements/test_requirements.py
|
kleprevost/CTFd
|
673e61c418cedfe28f5b32701e650fd89d54f737
|
[
"Apache-2.0"
] | 1,736
|
2017-03-13T14:01:28.000Z
|
2022-03-31T08:14:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Users
from CTFd.utils import set_config
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_challenge,
gen_solve,
login_as_user,
register_user,
)
def test_api_challenges_admins_can_bypass_requirements():
"""Test that admins can bypass requirements checks with admin capabilities and view-admin"""
app = create_ctfd()
with app.app_context():
# Create challenges
prereq_id = gen_challenge(app.db).id
chal_obj = gen_challenge(app.db)
chal_obj.requirements = {"prerequisites": [prereq_id]}
register_user(app)
# Confirm that regular users cannot see prerequisites
with login_as_user(app) as client:
# Locked challenges aren't shown
r = client.get("/api/v1/challenges")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 1
assert data[0]["id"] == 1
# Not even with tricks
r = client.get("/api/v1/challenges?view=admin")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 1
assert data[0]["id"] == 1
# Not even with forced browsing
r = client.get("/api/v1/challenges/2")
assert r.status_code == 403
# Confirm that admins
with login_as_user(app, name="admin") as admin:
# Admins see as regular users
r = admin.get("/api/v1/challenges")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 1
assert data[0]["id"] == 1
# Now admins can see all challenges
r = admin.get("/api/v1/challenges?view=admin")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 2
assert data[0]["id"] == 1
assert data[1]["id"] == 2
# Admins can force browse to challenges
r = admin.get("/api/v1/challenges/2")
assert r.status_code == 200
assert r.get_json()["data"]
destroy_ctfd(app)
def test_api_challenges_challenge_with_requirements():
"""Does the challenge list API show challenges with requirements met?"""
app = create_ctfd()
with app.app_context():
prereq_id = gen_challenge(app.db).id
chal_obj = gen_challenge(app.db)
chal_obj.requirements = {"prerequisites": [prereq_id]}
chal_id = chal_obj.id
# Create a new user which will solve the prerequisite
register_user(app)
# Confirm that only the prerequisite challenge is listed initially
with login_as_user(app) as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
(chal_data,) = r.get_json()["data"]
assert chal_data["id"] == prereq_id
# Generate a solve and then confirm the second challenge is visible
gen_solve(app.db, user_id=2, challenge_id=prereq_id)
with login_as_user(app) as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 2
chal_ids = {c["id"] for c in r.get_json()["data"]}
assert chal_ids == {prereq_id, chal_id}
destroy_ctfd(app)
def test_api_challenges_challenge_with_requirements_hidden_user():
"""Does the challenge list API show gated challenges to a hidden user?"""
app = create_ctfd()
with app.app_context():
prereq_id = gen_challenge(app.db).id
chal_obj = gen_challenge(app.db)
chal_obj.requirements = {"prerequisites": [prereq_id]}
chal_id = chal_obj.id
# Create a new user which will solve the prerequisite and hide them
register_user(app)
Users.query.get(2).hidden = True
app.db.session.commit()
# Confirm that only the prerequisite challenge is listed initially
with login_as_user(app) as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
(chal_data,) = r.get_json()["data"]
assert chal_data["id"] == prereq_id
# Generate a solve and then confirm the second challenge is visible
gen_solve(app.db, user_id=2, challenge_id=prereq_id)
with login_as_user(app) as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
data = r.get_json()["data"]
assert len(data) == 2
chal_ids = {c["id"] for c in r.get_json()["data"]}
assert chal_ids == {prereq_id, chal_id}
destroy_ctfd(app)
def test_api_challenges_challenge_with_requirements_banned_user():
"""Does the challenge list API show gated challenges to a banned user?"""
app = create_ctfd()
with app.app_context():
prereq_id = gen_challenge(app.db).id
chal_obj = gen_challenge(app.db)
chal_obj.requirements = {"prerequisites": [prereq_id]}
# Create a new user which will solve the prerequisite and ban them
register_user(app)
Users.query.get(2).banned = True
app.db.session.commit()
# Generate a solve just in case and confirm the API 403s
gen_solve(app.db, user_id=2, challenge_id=prereq_id)
with login_as_user(app) as client:
assert client.get("/api/v1/challenges").status_code == 403
destroy_ctfd(app)
def test_api_challenges_challenge_with_requirements_no_user():
"""Does the challenge list API show gated challenges to the public?"""
app = create_ctfd()
with app.app_context():
set_config("challenge_visibility", "public")
prereq_id = gen_challenge(app.db).id
chal_obj = gen_challenge(app.db)
chal_obj.requirements = {"prerequisites": [prereq_id]}
# Create a new user which will solve the prerequisite
register_user(app)
# Confirm that only the prerequisite challenge is listed publicly
with app.test_client() as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
initial_data = r.get_json()["data"]
(chal_data,) = initial_data
assert chal_data["id"] == prereq_id
# Fix up the solve count for later comparison with `initial_data`
chal_data["solves"] += 1
# Generate a solve and then confirm the response is unchanged
gen_solve(app.db, user_id=2, challenge_id=prereq_id)
with app.test_client() as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
assert r.get_json()["data"] == initial_data
destroy_ctfd(app)
| 39.924419
| 96
| 0.611621
| 921
| 6,867
| 4.369164
| 0.142237
| 0.037773
| 0.025845
| 0.058151
| 0.797714
| 0.762425
| 0.750746
| 0.706759
| 0.690358
| 0.67669
| 0
| 0.016453
| 0.283093
| 6,867
| 171
| 97
| 40.157895
| 0.800934
| 0.205184
| 0
| 0.746032
| 0
| 0
| 0.080222
| 0.010721
| 0
| 0
| 0
| 0
| 0.246032
| 1
| 0.039683
| false
| 0.007937
| 0.02381
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9cd70e6030a27a2bfca8cc0a446d07f4879f62c
| 54
|
py
|
Python
|
enthought/traits/ui/wx/tuple_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/traits/ui/wx/tuple_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/traits/ui/wx/tuple_editor.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from traitsui.wx.tuple_editor import *
| 18
| 38
| 0.796296
| 8
| 54
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 39
| 27
| 0.893617
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a11d47852db5edccc2dece46e3d79f0849a6572
| 104
|
py
|
Python
|
python_programming/basics/module.py
|
JoshuaTPritchett/30DaysCoding
|
b361a4cf38dea66b43231fabf43252d202440811
|
[
"Unlicense"
] | null | null | null |
python_programming/basics/module.py
|
JoshuaTPritchett/30DaysCoding
|
b361a4cf38dea66b43231fabf43252d202440811
|
[
"Unlicense"
] | null | null | null |
python_programming/basics/module.py
|
JoshuaTPritchett/30DaysCoding
|
b361a4cf38dea66b43231fabf43252d202440811
|
[
"Unlicense"
] | null | null | null |
def print_test():
print 'yo you made your first module'
test_string = 'YO THIS IS A TEST STRING'
| 14.857143
| 41
| 0.692308
| 18
| 104
| 3.888889
| 0.722222
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 104
| 6
| 42
| 17.333333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.514563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6a1203fdbc642a6d08478d47e681fb5f7147f985
| 141
|
py
|
Python
|
src/examples/b_input_proc_output/input_process_output.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | null | null | null |
src/examples/b_input_proc_output/input_process_output.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | null | null | null |
src/examples/b_input_proc_output/input_process_output.py
|
MSGP117/acc-cosc-1336-spring-2022-MSGP117
|
46fdfa5da8f8eb887d2c79fe205b8a0064d6903d
|
[
"MIT"
] | 1
|
2022-02-12T03:50:32.000Z
|
2022-02-12T03:50:32.000Z
|
#output comments variables input calculations output constants
def display_output():
print('hello')
def test_config():
return True
| 17.625
| 62
| 0.751773
| 17
| 141
| 6.117647
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 141
| 7
| 63
| 20.142857
| 0.888889
| 0.432624
| 0
| 0
| 0
| 0
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.25
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
6a23580847499a9eb038e3a7e90147a9540ff931
| 188
|
py
|
Python
|
App_Blog/admin.py
|
nitinkumar388/Django-Blog-Web-Project
|
6c3d09b342645701063b1e66523e77f62bed9db3
|
[
"MIT"
] | null | null | null |
App_Blog/admin.py
|
nitinkumar388/Django-Blog-Web-Project
|
6c3d09b342645701063b1e66523e77f62bed9db3
|
[
"MIT"
] | null | null | null |
App_Blog/admin.py
|
nitinkumar388/Django-Blog-Web-Project
|
6c3d09b342645701063b1e66523e77f62bed9db3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . models import Blog, Comment, Likes
# Register your models here.
admin.site.register(Blog)
admin.site.register(Comment)
admin.site.register(Likes)
| 20.888889
| 41
| 0.792553
| 27
| 188
| 5.518519
| 0.481481
| 0.181208
| 0.342282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111702
| 188
| 8
| 42
| 23.5
| 0.892216
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6a34d0bfcfffc4e8bc4393987008b63a98f2f380
| 75
|
py
|
Python
|
vk/bot_framework/addons/caching/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 24
|
2019-09-13T15:30:09.000Z
|
2022-03-09T06:35:59.000Z
|
vk/bot_framework/addons/caching/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | null | null | null |
vk/bot_framework/addons/caching/__init__.py
|
Inzilkin/vk.py
|
969f01e666c877c1761c3629a100768f93de27eb
|
[
"MIT"
] | 12
|
2019-09-13T15:30:31.000Z
|
2022-03-01T10:13:32.000Z
|
from .cached_object import CachedResponse
from .core import cached_handler
| 25
| 41
| 0.866667
| 10
| 75
| 6.3
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 42
| 37.5
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dbe78957586ae5ab09fb4416c50b690db7cc080c
| 7,050
|
py
|
Python
|
tests/components/xiaomi_miio/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
tests/components/xiaomi_miio/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 57
|
2020-10-15T06:47:00.000Z
|
2022-03-31T06:11:18.000Z
|
tests/components/xiaomi_miio/test_config_flow.py
|
miccico/core
|
14c205384171dee59c1a908f8449f9864778b2dc
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Test the Xiaomi Miio config flow."""
from unittest.mock import Mock, patch
from miio import DeviceException
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.xiaomi_miio import config_flow, const
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
ZEROCONF_NAME = "name"
ZEROCONF_PROP = "properties"
ZEROCONF_MAC = "mac"
TEST_HOST = "1.2.3.4"
TEST_TOKEN = "12345678901234567890123456789012"
TEST_NAME = "Test_Gateway"
TEST_MODEL = "model5"
TEST_MAC = "ab:cd:ef:gh:ij:kl"
TEST_GATEWAY_ID = TEST_MAC
TEST_HARDWARE_VERSION = "AB123"
TEST_FIRMWARE_VERSION = "1.2.3_456"
TEST_ZEROCONF_NAME = "lumi-gateway-v3_miio12345678._miio._udp.local."
TEST_SUB_DEVICE_LIST = []
def get_mock_info(
model=TEST_MODEL,
mac_address=TEST_MAC,
hardware_version=TEST_HARDWARE_VERSION,
firmware_version=TEST_FIRMWARE_VERSION,
):
"""Return a mock gateway info instance."""
gateway_info = Mock()
gateway_info.model = model
gateway_info.mac_address = mac_address
gateway_info.hardware_version = hardware_version
gateway_info.firmware_version = firmware_version
return gateway_info
async def test_config_flow_step_user_no_device(hass):
"""Test config flow, user step with no device selected."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "no_device_selected"}
async def test_config_flow_step_gateway_connect_error(hass):
"""Test config flow, gateway connection error."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{config_flow.CONF_GATEWAY: True},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway"
assert result["errors"] == {}
with patch(
"homeassistant.components.xiaomi_miio.gateway.gateway.Gateway.info",
side_effect=DeviceException({}),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_NAME: TEST_NAME, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway"
assert result["errors"] == {"base": "cannot_connect"}
async def test_config_flow_gateway_success(hass):
"""Test a successful config flow."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{config_flow.CONF_GATEWAY: True},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway"
assert result["errors"] == {}
mock_info = get_mock_info()
with patch(
"homeassistant.components.xiaomi_miio.gateway.gateway.Gateway.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.gateway.gateway.Gateway.discover_devices",
return_value=TEST_SUB_DEVICE_LIST,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST, CONF_NAME: TEST_NAME, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
config_flow.CONF_FLOW_TYPE: config_flow.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
"model": TEST_MODEL,
"mac": TEST_MAC,
}
async def test_zeroconf_gateway_success(hass):
"""Test a successful zeroconf discovery of a gateway."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: TEST_ZEROCONF_NAME,
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "form"
assert result["step_id"] == "gateway"
assert result["errors"] == {}
mock_info = get_mock_info()
with patch(
"homeassistant.components.xiaomi_miio.gateway.gateway.Gateway.info",
return_value=mock_info,
), patch(
"homeassistant.components.xiaomi_miio.gateway.gateway.Gateway.discover_devices",
return_value=TEST_SUB_DEVICE_LIST,
), patch(
"homeassistant.components.xiaomi_miio.async_setup_entry", return_value=True
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_NAME: TEST_NAME, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
config_flow.CONF_FLOW_TYPE: config_flow.CONF_GATEWAY,
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
"model": TEST_MODEL,
"mac": TEST_MAC,
}
async def test_zeroconf_unknown_device(hass):
"""Test a failed zeroconf discovery because of a unknown device."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: "not-a-xiaomi-miio-device",
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_no_data(hass):
"""Test a failed zeroconf discovery because of no data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data={}
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
async def test_zeroconf_missing_data(hass):
"""Test a failed zeroconf discovery because of missing data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={zeroconf.ATTR_HOST: TEST_HOST, ZEROCONF_NAME: TEST_ZEROCONF_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_miio"
| 31.900452
| 88
| 0.673475
| 853
| 7,050
| 5.280188
| 0.123095
| 0.095915
| 0.043295
| 0.060613
| 0.742007
| 0.737123
| 0.710924
| 0.710924
| 0.701821
| 0.668517
| 0
| 0.009823
| 0.205816
| 7,050
| 220
| 89
| 32.045455
| 0.794606
| 0.009929
| 0
| 0.627219
| 0
| 0
| 0.170769
| 0.084929
| 0
| 0
| 0
| 0
| 0.213018
| 1
| 0.005917
| false
| 0
| 0.035503
| 0
| 0.047337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e0197a3c0ebbf95901f953695fc181d52df6f4d2
| 222
|
py
|
Python
|
Configuration/Geometry/python/GeometrySimTracker_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/Geometry/python/GeometrySimTracker_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/Geometry/python/GeometrySimTracker_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# Ideal geometry, needed for simulation
from Geometry.CMSCommonData.trackerOnlyGeometryXML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
| 37
| 75
| 0.869369
| 23
| 222
| 8.304348
| 0.73913
| 0.125654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085586
| 222
| 5
| 76
| 44.4
| 0.940887
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e0208da88932e790e06146f6491bc63b820d9c7c
| 134
|
py
|
Python
|
CityProject/CityProjectApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-clyde5649
|
5150f3aa4ed8c8fe99e30d23974f357c56a5705a
|
[
"Apache-2.0"
] | null | null | null |
CityProject/CityProjectApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-clyde5649
|
5150f3aa4ed8c8fe99e30d23974f357c56a5705a
|
[
"Apache-2.0"
] | null | null | null |
CityProject/CityProjectApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-clyde5649
|
5150f3aa4ed8c8fe99e30d23974f357c56a5705a
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import CityApplication
# Register your models here.
admin.site.register(CityApplication)
| 33.5
| 36
| 0.843284
| 17
| 134
| 6.647059
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097015
| 134
| 4
| 36
| 33.5
| 0.933884
| 0.19403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ec6d6bd2d1889a6708eff6b20bbb06042f1b764
| 209
|
py
|
Python
|
sindy_bvp/differentiators/__init__.py
|
sheadan/SINDy-BVP
|
ac5b2bb4854bb311e4f6f26b180dde87cc10c13d
|
[
"MIT"
] | 8
|
2020-05-19T23:56:39.000Z
|
2022-03-04T19:22:56.000Z
|
sindy_bvp/differentiators/__init__.py
|
sheadan/SINDy-BVP
|
ac5b2bb4854bb311e4f6f26b180dde87cc10c13d
|
[
"MIT"
] | null | null | null |
sindy_bvp/differentiators/__init__.py
|
sheadan/SINDy-BVP
|
ac5b2bb4854bb311e4f6f26b180dde87cc10c13d
|
[
"MIT"
] | 3
|
2020-08-07T17:57:02.000Z
|
2021-03-19T23:44:44.000Z
|
from .base_differentiator import BaseDifferentiator
from .finite_differences import FiniteDifferences
from .poly_interp import PolyInterp
__all__ = ["base_differentiator", "finite_differences", "poly_interp"]
| 41.8
| 70
| 0.851675
| 22
| 209
| 7.636364
| 0.545455
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08134
| 209
| 5
| 70
| 41.8
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ee1b0021f210f5332c8eed34b44e32e59c82f7d
| 4,687
|
py
|
Python
|
tests/tests.py
|
LLNL/spdlayers
|
27e0dc2ac16ed89c559b0ac78fb9cb2784f1e7ca
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
LLNL/spdlayers
|
27e0dc2ac16ed89c559b0ac78fb9cb2784f1e7ca
|
[
"MIT"
] | 2
|
2021-12-01T21:02:46.000Z
|
2022-02-06T23:05:51.000Z
|
tests/tests.py
|
LLNL/spdlayers
|
27e0dc2ac16ed89c559b0ac78fb9cb2784f1e7ca
|
[
"MIT"
] | null | null | null |
# Copyright 2021, Lawrence Livermore National Security, LLC and spdlayer
# contributors
# SPDX-License-Identifier: MIT
import unittest
import numpy as np
import torch
import torch.nn as nn
from spdlayers import Eigen, Cholesky
from spdlayers import in_shape_from
positive_funs = ['Abs', 'Square', 'Softplus', 'ReLU', 'ReLU6', '4', 'Exp']
positive_funs_chol = positive_funs + ['None']
batch_size = 1000
class TestSPD(unittest.TestCase):
def test_input_shape(self):
in_shape = in_shape_from(6)
self.assertTrue(in_shape == 21)
self.assertIsInstance(in_shape, int)
def test_Eigen_anisotropic(self):
x = torch.rand(batch_size, 21).double()
for pos in positive_funs:
myEigen = Eigen(output_shape=6, positive=pos).double()
out = myEigen(x)
u = torch.real(torch.linalg.eigvals(out))
min_eig_val = torch.min(u).item()
self.assertTrue(min_eig_val > 0.0)
def test_Cholesky_anisotropic(self):
x = torch.rand(batch_size, 21).double()
for pos in positive_funs_chol:
myCholesky = Cholesky(output_shape=6, positive=pos).double()
out = myCholesky(x)
u = torch.real(torch.linalg.eigvals(out))
min_eig_val = torch.min(u).item()
if np.isclose(min_eig_val, 0.0, rtol=1e-10, atol=1e-10):
min_eig_val = 0.0
self.assertTrue(min_eig_val >= 0.0)
def test_Eigen_orthotropic(self):
x = torch.rand(batch_size, 9).double()
for pos in positive_funs:
myEigen = Eigen(output_shape=6,
symmetry='orthotropic',
positive=pos).double()
out = myEigen(x)
u = torch.real(torch.linalg.eigvals(out))
min_eig_val = torch.min(u).item()
self.assertTrue(min_eig_val > 0.0)
def test_Cholesky_orthotropic(self):
x = torch.rand(batch_size, 9).double()
for pos in positive_funs:
myCholesky = Cholesky(output_shape=6,
symmetry='orthotropic',
positive=pos).double()
out = myCholesky(x)
u = torch.real(torch.linalg.eigvals(out))
min_eig_val = torch.min(u).item()
if np.isclose(min_eig_val, 0.0, rtol=1e-10, atol=1e-10):
min_eig_val = 0.0
self.assertTrue(min_eig_val >= 0.0)
def test_value_errors_eig(self):
try:
_ = Eigen(symmetry='bob')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
try:
_ = Eigen(output_shape=7, symmetry='orthotropic')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
try:
_ = Eigen(positive='hello')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_value_errors_chol(self):
try:
_ = Cholesky(symmetry='bob')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
try:
_ = Cholesky(output_shape=7, symmetry='orthotropic')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
try:
_ = Cholesky(positive='hello')
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_example_one(self):
hidden_size = 100
n_features = 2
out_shape = 6
in_shape = in_shape_from(out_shape)
model = nn.Sequential(
nn.Linear(n_features, hidden_size),
nn.Linear(hidden_size, in_shape),
Cholesky(output_shape=out_shape)
)
x = torch.rand((10, n_features))
model(x)
def test_example_two(self):
hidden_size = 100
n_features = 2
out_shape = 6
in_shape = in_shape_from(out_shape)
model = nn.Sequential(
nn.Linear(n_features, hidden_size),
nn.Linear(hidden_size, in_shape),
Eigen(output_shape=out_shape)
)
x = torch.rand((10, n_features))
model(x)
def test_different_output(self):
hidden_size = 100
n_features = 2
out_shape = 21
in_shape = in_shape_from(out_shape)
model = nn.Sequential(
nn.Linear(n_features, hidden_size),
nn.Linear(hidden_size, in_shape),
Eigen(output_shape=out_shape)
)
x = torch.rand((10, n_features))
model(x)
if __name__ == '__main__':
unittest.main()
| 31.884354
| 74
| 0.574781
| 562
| 4,687
| 4.564057
| 0.186833
| 0.092788
| 0.042105
| 0.031189
| 0.785965
| 0.764912
| 0.764912
| 0.760234
| 0.760234
| 0.727096
| 0
| 0.023629
| 0.322808
| 4,687
| 146
| 75
| 32.10274
| 0.784499
| 0.023896
| 0
| 0.669355
| 0
| 0
| 0.022315
| 0
| 0
| 0
| 0
| 0
| 0.145161
| 1
| 0.080645
| false
| 0
| 0.048387
| 0
| 0.137097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ef058555d5e6fbc64092ed9c182475e2cc548ab
| 140
|
py
|
Python
|
hack/nmapportscan/index.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | 1
|
2019-07-09T09:59:39.000Z
|
2019-07-09T09:59:39.000Z
|
hack/nmapportscan/index.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
hack/nmapportscan/index.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
if __name__ == '__main__':
subprocess.call("python3 nmap.py -H www.baidu.com -p 21,22,25,80,143,145,443", shell=True)
| 35
| 94
| 0.714286
| 24
| 140
| 3.833333
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0.121429
| 140
| 4
| 94
| 35
| 0.601626
| 0
| 0
| 0
| 0
| 0.333333
| 0.475177
| 0.163121
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1622a6a6c5d6abb5f0578d05e5b5784c58372929
| 373
|
py
|
Python
|
atest/testdata/standard_libraries/builtin/RegisteringLibrary.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 7,073
|
2015-01-01T17:19:16.000Z
|
2022-03-31T22:01:29.000Z
|
atest/testdata/standard_libraries/builtin/RegisteringLibrary.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,412
|
2015-01-02T09:29:05.000Z
|
2022-03-31T13:10:46.000Z
|
atest/testdata/standard_libraries/builtin/RegisteringLibrary.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,298
|
2015-01-03T02:47:15.000Z
|
2022-03-31T02:00:16.000Z
|
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
def run_keyword_function(name, *args):
return BuiltIn().run_keyword(name, *args)
register_run_keyword(__name__, 'run_keyword_function', 1)
def run_keyword_without_keyword(*args):
return BuiltIn().run_keyword('\Log Many', *args)
register_run_keyword(__name__, 'run_keyword_without_keyword', 0)
| 26.642857
| 65
| 0.790885
| 51
| 373
| 5.27451
| 0.352941
| 0.334572
| 0.200743
| 0.148699
| 0.468401
| 0.267658
| 0.267658
| 0
| 0
| 0
| 0
| 0.005952
| 0.099196
| 373
| 13
| 66
| 28.692308
| 0.794643
| 0
| 0
| 0
| 0
| 0
| 0.150134
| 0.072386
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
162617b4649a84f41a26ecbf6666cbe9336a999c
| 113
|
py
|
Python
|
msbd/modello_lineare/__init__.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | 1
|
2019-02-17T09:28:04.000Z
|
2019-02-17T09:28:04.000Z
|
msbd/modello_lineare/__init__.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | null | null | null |
msbd/modello_lineare/__init__.py
|
mnslarcher/metodi-statistici-big-data
|
4587b4e4104557e50d09d028259d6c42c44d2814
|
[
"MIT"
] | null | null | null |
from .modello_lineare_dinamico import ModelloLineareDinamico
from .regressione_lineare import RegressioneLineare
| 37.666667
| 60
| 0.911504
| 11
| 113
| 9.090909
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070796
| 113
| 2
| 61
| 56.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
162e0ccb9cee7a5df9964472aa620f9d9218f291
| 157
|
py
|
Python
|
elrte/tests.py
|
joonas/django-elrte
|
6ec0db03d30d0c3dd0314ef48eecdb23280ec8df
|
[
"Apache-2.0"
] | 1
|
2016-03-22T08:15:14.000Z
|
2016-03-22T08:15:14.000Z
|
elrte/tests.py
|
joonas/django-elrte
|
6ec0db03d30d0c3dd0314ef48eecdb23280ec8df
|
[
"Apache-2.0"
] | null | null | null |
elrte/tests.py
|
joonas/django-elrte
|
6ec0db03d30d0c3dd0314ef48eecdb23280ec8df
|
[
"Apache-2.0"
] | null | null | null |
from django.test import TestCase
class django-elrteTest(TestCase):
"""
Tests for django-elrte
"""
def test_django-elrte(self):
pass
| 17.444444
| 33
| 0.649682
| 19
| 157
| 5.315789
| 0.684211
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.248408
| 157
| 9
| 34
| 17.444444
| 0.855932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.25
| 0.25
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
16359ad8ccd9071e20bd8e57d23fdafdf0f7a4ee
| 442
|
py
|
Python
|
120-Raise/Script.py
|
dikyindrah/Python-Pemrograman-Dasar-02
|
7b60391d348504eea8ebc36896857e3e92fda3f7
|
[
"MIT"
] | null | null | null |
120-Raise/Script.py
|
dikyindrah/Python-Pemrograman-Dasar-02
|
7b60391d348504eea8ebc36896857e3e92fda3f7
|
[
"MIT"
] | null | null | null |
120-Raise/Script.py
|
dikyindrah/Python-Pemrograman-Dasar-02
|
7b60391d348504eea8ebc36896857e3e92fda3f7
|
[
"MIT"
] | null | null | null |
# input -1
n = int(input('input nilai: '))
if n <= 0:
# Menentukan pengecualian & teks yang akan di tampilkan
raise ValueError('nilai n harus bilangan positif')
try:
n = int(input('input nilai: '))
if n <= 0:
# Menentukan pengecualian & teks yang akan di tampilkan
raise ValueError('nilai n harus bilangan positif')
except ValueError as ve:
# Menampilkan teks dari pengecualian yang terjadi
print(ve)
| 29.466667
| 63
| 0.669683
| 59
| 442
| 5.016949
| 0.457627
| 0.027027
| 0.060811
| 0.094595
| 0.736486
| 0.736486
| 0.736486
| 0.736486
| 0.736486
| 0.736486
| 0
| 0.008902
| 0.237557
| 442
| 15
| 64
| 29.466667
| 0.869436
| 0.371041
| 0
| 0.666667
| 0
| 0
| 0.313869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
163cae4a826b44ed65d6e531e900e084681bfa6c
| 2,306
|
py
|
Python
|
tests/test_catz2nsd_config.py
|
kirei/catz
|
419629bc3089ae22b94e9568668bcbc0ddd9243c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_catz2nsd_config.py
|
kirei/catz
|
419629bc3089ae22b94e9568668bcbc0ddd9243c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_catz2nsd_config.py
|
kirei/catz
|
419629bc3089ae22b94e9568668bcbc0ddd9243c
|
[
"BSD-2-Clause"
] | null | null | null |
import os.path
from pathlib import Path
import pytest
from dnscatz import catz2nsd
from dnscatz.catz2nsd import CatalogZoneError, InvalidConfigurationError
DATADIR = Path(os.path.abspath(os.path.dirname(__file__))) / "data"
CONFIG_GOOD = """
catalog-zone:
name: test.catz
zonefile: good.zone
pattern: ns1
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
"""
CONFIG_BAD_ZONE1 = """
catalog-zone:
name: test.catz
zonefile: bad_non_unique_id.zone
pattern: ns1
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
"""
CONFIG_BAD_ZONE2 = """
catalog-zone:
name: test.catz
zonefile: invalid_version.zone
pattern: ns1
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
"""
CONFIG_BAD_KEY_DUPE = """
catalog-zone:
name: test.catz
zonefile: good.zone
pattern: ns1
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
"""
CONFIG_BAD_ZONE_DUPE = """
catalog-zone:
name: test.catz
zonefile: good.zone
pattern: ns1
catalog-zone:
name: test.catz
zonefile: good.zone
pattern: ns1
key:
name: key1
algorithm: hmac-sha256
secret: "b90O5awgKh8zY9Tkc3Yc9lmREgRi0S5JJGJ5JaGF3fw="
"""
def test_config_good():
config = catz2nsd.parse_multidicts(CONFIG_GOOD)
_ = catz2nsd.parse_config(config, cwd=DATADIR)
def test_config_bad_1():
config = catz2nsd.parse_multidicts(CONFIG_BAD_ZONE1)
with pytest.raises(CatalogZoneError):
_ = catz2nsd.parse_config(config, cwd=DATADIR)
def test_config_bad_2():
config = catz2nsd.parse_multidicts(CONFIG_BAD_ZONE2)
with pytest.raises(CatalogZoneError):
_ = catz2nsd.parse_config(config, cwd=DATADIR)
def test_config_bad_key_dup():
config = catz2nsd.parse_multidicts(CONFIG_BAD_KEY_DUPE)
with pytest.raises(InvalidConfigurationError):
_ = catz2nsd.parse_config(config, cwd=DATADIR)
def test_config_bad_zone_dup():
config = catz2nsd.parse_multidicts(CONFIG_BAD_ZONE_DUPE)
with pytest.raises(InvalidConfigurationError):
_ = catz2nsd.parse_config(config, cwd=DATADIR)
| 20.963636
| 72
| 0.751951
| 265
| 2,306
| 6.313208
| 0.188679
| 0.064555
| 0.053796
| 0.068141
| 0.830843
| 0.809922
| 0.727436
| 0.678422
| 0.678422
| 0.678422
| 0
| 0.058252
| 0.151344
| 2,306
| 109
| 73
| 21.155963
| 0.796627
| 0
| 0
| 0.722892
| 0
| 0
| 0.457069
| 0.129228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.060241
| 0
| 0.120482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16cd6bc757c70661ee483d392cc5fa510608682f
| 6,540
|
py
|
Python
|
zephyrus_sc2_parser/gamedata/16939/ability_data.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | 28
|
2020-03-11T14:07:12.000Z
|
2022-03-24T23:53:08.000Z
|
zephyrus_sc2_parser/gamedata/16939/ability_data.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | 19
|
2020-06-25T11:59:43.000Z
|
2022-03-27T23:11:00.000Z
|
zephyrus_sc2_parser/gamedata/16939/ability_data.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | 4
|
2020-06-24T21:12:15.000Z
|
2021-11-19T15:27:35.000Z
|
# Version 16939
abilities = {36: {'ability_name': 'stop'}, 38: {'ability_name': 'move'}, 45: {'ability_name': 'attack'}, 60: {'ability_name': 'GhostHoldFire'}, 61: {'ability_name': 'GhostWeaponsFree'}, 63: {'ability_name': 'Explode'}, 65: {'ability_name': 'FungalGrowth', 'energy_cost': 75}, 66: {'ability_name': 'GuardianShield', 'energy_cost': 75}, 70: {'ability_name': 'Feedback', 'energy_cost': 50}, 73: {'ability_name': 'HallucinationArchon', 'energy_cost': 75}, 74: {'ability_name': 'HallucinationColossus', 'energy_cost': 75}, 75: {'ability_name': 'HallucinationHighTemplar', 'energy_cost': 75}, 76: {'ability_name': 'HallucinationImmortal', 'energy_cost': 75}, 77: {'ability_name': 'HallucinationPhoenix', 'energy_cost': 75}, 78: {'ability_name': 'HallucinationProbe', 'energy_cost': 75}, 79: {'ability_name': 'HallucinationStalker', 'energy_cost': 75}, 80: {'ability_name': 'HallucinationVoidRay', 'energy_cost': 75}, 81: {'ability_name': 'HallucinationWarpPrism', 'energy_cost': 75}, 82: {'ability_name': 'HallucinationZealot', 'energy_cost': 75}, 85: {'ability_name': 'CalldownMULE', 'energy_cost': 50}, 86: {'ability_name': 'GravitonBeam', 'energy_cost': 50}, 90: {'ability_name': 'SpawnChangeling', 'energy_cost': 50}, 97: {'ability_name': 'Rally'}, 99: {'ability_name': 'RallyCommand'}, 101: {'ability_name': 'RallyHatchery'}, 105: {'ability_name': 'NeuralParasite', 'energy_cost': 100}, 106: {'ability_name': 'SpawnLarva', 'energy_cost': 25}, 107: {'ability_name': 'StimpackMarauder'}, 108: {'ability_name': 'SupplyDrop', 'energy_cost': 50}, 126: {'ability_name': 'Stimpack'}, 127: {'ability_name': 'GhostCloak', 'energy_cost': 25}, 130: {'ability_name': 'SiegeMode'}, 131: {'ability_name': 'Unsiege'}, 132: {'ability_name': 'BansheeCloak', 'energy_cost': 25}, 133: {'ability_name': 'MedivacTransport'}, 134: {'ability_name': 'ScannerSweep', 'energy_cost': 50}, 135: {'ability_name': 'Yamato'}, 136: {'ability_name': 'AssaultMode'}, 137: {'ability_name': 'FighterMode'}, 139: {'ability_name': 'CommandCenterTransport'}, 140: {'ability_name': 'CommandCenterLiftOff'}, 141: {'ability_name': 'CommandCenterLand'}, 143: {'ability_name': 'BarracksLiftOff'}, 145: {'ability_name': 'FactoryLiftOff'}, 147: {'ability_name': 'StarportLiftOff'}, 148: {'ability_name': 'FactoryLand'}, 149: {'ability_name': 'StarportLand'}, 151: {'ability_name': 'BarracksLand'}, 152: {'ability_name': 'SupplyDepotLower'}, 153: {'ability_name': 'SupplyDepotRaise'}, 166: {'ability_name': 'WarpPrismTransport'}, 171: {'ability_name': 'PsiStorm', 'energy_cost': 75}, 181: {'ability_name': 'UpgradeToLair'}, 182: {'ability_name': 'UpgradeToHive'}, 183: {'ability_name': 'UpgradeToGreaterSpire'}, 189: {'ability_name': 'MorphToBroodLord'}, 190: {'ability_name': 'BurrowBanelingDown'}, 191: {'ability_name': 'BurrowBanelingUp'}, 192: {'ability_name': 'BurrowDroneDown'}, 193: {'ability_name': 'BurrowDroneUp'}, 194: {'ability_name': 'BurrowHydraliskDown'}, 195: {'ability_name': 'BurrowHydraliskUp'}, 196: {'ability_name': 'BurrowRoachDown'}, 197: {'ability_name': 'BurrowRoachUp'}, 198: {'ability_name': 'BurrowZerglingDown'}, 199: {'ability_name': 'BurrowZerglingUp'}, 206: {'ability_name': 'OverlordTransport'}, 210: {'ability_name': 'BurrowQueenDown'}, 211: {'ability_name': 'BurrowQueenUp'}, 212: {'ability_name': 'NydusCanalTransport'}, 213: {'ability_name': 'Blink'}, 214: {'ability_name': 'BurrowInfestorDown'}, 215: {'ability_name': 'BurrowInfestorUp'}, 217: {'ability_name': 'UpgradeToPlanetaryFortress'}, 220: {'ability_name': 'BurrowUltraliskDown'}, 221: {'ability_name': 'BurrowUltraliskUp'}, 222: {'ability_name': 'UpgradeToOrbital'}, 225: {'ability_name': 'OrbitalLiftOff'}, 226: {'ability_name': 'OrbitalCommandLand'}, 227: {'ability_name': 'ForceField', 'energy_cost': 50}, 228: {'ability_name': 'PhasingMode'}, 229: {'ability_name': 'TransportMode'}, 233: {'ability_name': 'TacNukeStrike'}, 236: {'ability_name': 'EMP', 'energy_cost': 75}, 240: {'ability_name': 'Transfusion', 'energy_cost': 50}, 249: {'ability_name': 'AttackRedirect'}, 250: {'ability_name': 'StimpackRedirect'}, 251: {'ability_name': 'StimpackMarauderRedirect'}, 253: {'ability_name': 'StopRedirect'}, 254: {'ability_name': 'GenerateCreep', 'energy_cost': 25}, 256: {'ability_name': 'SpineCrawlerUproot'}, 257: {'ability_name': 'SporeCrawlerUproot'}, 258: {'ability_name': 'SpineCrawlerRoot'}, 259: {'ability_name': 'SporeCrawlerRoot'}, 261: {'ability_name': 'BuildAutoTurret'}, 265: {'ability_name': 'Charge'}, 269: {'ability_name': 'Contaminate', 'energy_cost': 125}, 341: {'ability_name': 'MorphToHellion'}, 351: {'ability_name': 'MorphToHellionTank'}, 365: {'ability_name': 'BlindingCloud', 'energy_cost': 100}, 367: {'ability_name': 'Yoink', 'energy_cost': 75}, 370: {'ability_name': 'ViperConsumeStructure'}, 374: {'ability_name': 'VolatileBurstBuilding'}, 381: {'ability_name': 'WidowMineBurrow'}, 382: {'ability_name': 'WidowMineUnburrow'}, 383: {'ability_name': 'WidowMineAttack'}, 384: {'ability_name': 'TornadoMissile'}, 388: {'ability_name': 'BurrowLurkerMPDown'}, 389: {'ability_name': 'BurrowLurkerMPUp'}, 391: {'ability_name': 'HallucinationOracle', 'energy_cost': 75}, 392: {'ability_name': 'MedivacSpeedBoost'}, 407: {'ability_name': 'OracleRevelation', 'energy_cost': 50}, 456: {'ability_name': 'TemporalField', 'energy_cost': 100}, 500: {'ability_name': 'MorphToRavager'}, 501: {'ability_name': 'MorphToLurker'}, 504: {'ability_name': 'RavagerCorrosiveBile'}, 505: {'ability_name': 'BurrowRavagerDown'}, 506: {'ability_name': 'BurrowRavagerUp'}, 508: {'ability_name': 'PurificationNovaTargeted'}, 510: {'ability_name': 'LockOn'}, 514: {'ability_name': 'Hyperjump'}, 516: {'ability_name': 'ThorAPMode'}, 517: {'ability_name': 'ThorNormalMode'}, 520: {'ability_name': 'NydusWormTransport'}, 521: {'ability_name': 'OracleWeapon', 'energy_cost': 25}, 528: {'ability_name': 'HallucinationDisruptor', 'energy_cost': 75}, 529: {'ability_name': 'HallucinationAdept', 'energy_cost': 75}, 530: {'ability_name': 'VoidRaySwarmDamageBoost'}, 590: {'ability_name': 'ParasiticBomb', 'energy_cost': 125}, 591: {'ability_name': 'AdeptPhaseShift'}, 594: {'ability_name': 'LurkerHoldFire'}, 595: {'ability_name': 'LurkerRemoveHoldFire'}, 598: {'ability_name': 'LiberatorAGTarget'}, 599: {'ability_name': 'LiberatorAATarget'}, 613: {'ability_name': 'KD8Charge'}, 616: {'ability_name': 'AdeptPhaseShiftCancel'}, 617: {'ability_name': 'AdeptShadePhaseShiftCancel'}, 625: {'ability_name': 'MorphToTransportOverlord'}, 628: {'ability_name': 'ChannelSnipe', 'energy_cost': 50}}
| 3,270
| 6,524
| 0.709021
| 673
| 6,540
| 6.627043
| 0.429421
| 0.342825
| 0.04843
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079431
| 0.075994
| 6,540
| 2
| 6,524
| 3,270
| 0.658613
| 0.001988
| 0
| 0
| 0
| 0
| 0.634692
| 0.055624
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16d641b8ece693de878165602cafdd15a18edefb
| 548
|
py
|
Python
|
grains/shortname.py
|
beornf/salt-contrib
|
062355938ad1cced273056e9c23dc344c6a2c858
|
[
"Apache-2.0"
] | 111
|
2015-01-16T02:48:12.000Z
|
2022-02-08T10:24:56.000Z
|
grains/shortname.py
|
beornf/salt-contrib
|
062355938ad1cced273056e9c23dc344c6a2c858
|
[
"Apache-2.0"
] | 60
|
2015-01-06T12:28:44.000Z
|
2020-12-01T21:30:38.000Z
|
grains/shortname.py
|
beornf/salt-contrib
|
062355938ad1cced273056e9c23dc344c6a2c858
|
[
"Apache-2.0"
] | 163
|
2015-01-06T09:40:31.000Z
|
2022-02-03T11:41:23.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Nick Soracco
:copyright: © 2015 by Nick Soracco
:license: BSD
salt.grains.shortname
~~~~~~~~~~~~~~~~~~~~~~~
Returns a string of the shortname of the machine, courtesy of
os.uname()[1].split('.')[0]
FIXME: Only works in Linux, requires uname() system call.
'''
from __future__ import absolute_import
import os
def shortname():
'''
Return the first characters of a nodename prior to the first period.
'''
return {'shortname': os.uname()[1].split('.')[0]}
| 21.92
| 72
| 0.609489
| 70
| 548
| 4.714286
| 0.671429
| 0.066667
| 0.048485
| 0.078788
| 0.084848
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021028
| 0.218978
| 548
| 24
| 73
| 22.833333
| 0.747664
| 0.662409
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16e8bb424790f32f99be45b8ec717db153a65007
| 5,678
|
py
|
Python
|
tests/open_alchemy/schemas/foreign_key/test_process.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 40
|
2019-11-05T06:50:35.000Z
|
2022-03-09T01:34:57.000Z
|
tests/open_alchemy/schemas/foreign_key/test_process.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 178
|
2019-11-03T04:10:38.000Z
|
2022-03-31T00:07:17.000Z
|
tests/open_alchemy/schemas/foreign_key/test_process.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 17
|
2019-11-04T07:22:46.000Z
|
2022-03-23T05:29:49.000Z
|
"""Tests for foreign key schemas processing."""
import pytest
from open_alchemy.schemas import foreign_key
PROCESS_TESTS = [
pytest.param(
{},
{},
id="empty",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {"prop_1": {"type": "integer"}},
}
},
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {"prop_1": {"type": "integer"}},
}
},
id="single no foreign key",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"ref_schema_1": {"$ref": "#/components/schemas/RefSchema1"}
},
},
"RefSchema1": {
"x-tablename": "ref_schema_1",
"x-foreign-key-column": "prop_1",
"type": "object",
"properties": {"prop_1": {"type": "integer"}},
},
},
{
"Schema1": {
"allOf": [
{
"x-tablename": "schema_1",
"properties": {
"ref_schema_1": {"$ref": "#/components/schemas/RefSchema1"}
},
},
{
"type": "object",
"properties": {
"ref_schema_1_prop_1": {
"type": "integer",
"x-foreign-key": "ref_schema_1.prop_1",
"x-dict-ignore": True,
"nullable": True,
}
},
},
]
},
"RefSchema1": {
"x-tablename": "ref_schema_1",
"x-foreign-key-column": "prop_1",
"type": "object",
"properties": {"prop_1": {"type": "integer"}},
},
},
id="single foreign key",
),
pytest.param(
{
"Schema1": {
"x-tablename": "schema_1",
"properties": {
"ref_schema_1": {"$ref": "#/components/schemas/RefSchema1"}
},
},
"RefSchema1": {
"x-tablename": "ref_schema_1",
"x-foreign-key-column": "prop_1",
"type": "object",
"properties": {"prop_1": {"type": "integer"}},
},
"Schema2": {
"x-tablename": "schema_2",
"properties": {
"ref_schema_2": {"$ref": "#/components/schemas/RefSchema2"}
},
},
"RefSchema2": {
"x-tablename": "ref_schema_2",
"x-foreign-key-column": "prop_2",
"type": "object",
"properties": {"prop_2": {"type": "integer"}},
},
},
{
"Schema1": {
"allOf": [
{
"x-tablename": "schema_1",
"properties": {
"ref_schema_1": {"$ref": "#/components/schemas/RefSchema1"}
},
},
{
"type": "object",
"properties": {
"ref_schema_1_prop_1": {
"type": "integer",
"x-foreign-key": "ref_schema_1.prop_1",
"x-dict-ignore": True,
"nullable": True,
}
},
},
]
},
"RefSchema1": {
"x-tablename": "ref_schema_1",
"x-foreign-key-column": "prop_1",
"type": "object",
"properties": {"prop_1": {"type": "integer"}},
},
"Schema2": {
"allOf": [
{
"x-tablename": "schema_2",
"properties": {
"ref_schema_2": {"$ref": "#/components/schemas/RefSchema2"}
},
},
{
"type": "object",
"properties": {
"ref_schema_2_prop_2": {
"type": "integer",
"x-foreign-key": "ref_schema_2.prop_2",
"x-dict-ignore": True,
"nullable": True,
}
},
},
]
},
"RefSchema2": {
"x-tablename": "ref_schema_2",
"x-foreign-key-column": "prop_2",
"type": "object",
"properties": {"prop_2": {"type": "integer"}},
},
},
id="multiple foreign keys",
),
]
@pytest.mark.parametrize("schemas, expected_schemas", PROCESS_TESTS)
@pytest.mark.schemas
def test_process(schemas, expected_schemas):
"""
GIVEN schemas and expected schemas
WHEN process is called with the schemas
THEN the expected schemas are modified so that they are equal to the expected
schemas.
"""
foreign_key.process(schemas=schemas)
assert schemas == expected_schemas
| 32.632184
| 87
| 0.346601
| 380
| 5,678
| 4.978947
| 0.163158
| 0.066596
| 0.057082
| 0.067653
| 0.765328
| 0.743658
| 0.729387
| 0.704545
| 0.698732
| 0.698732
| 0
| 0.024026
| 0.516203
| 5,678
| 173
| 88
| 32.820809
| 0.664725
| 0.036457
| 0
| 0.575
| 0
| 0
| 0.293122
| 0.034204
| 0
| 0
| 0
| 0
| 0.00625
| 1
| 0.00625
| false
| 0
| 0.0125
| 0
| 0.01875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc6d04fc828dab50345d4be44bb7f48039896224
| 47
|
py
|
Python
|
run_generator.py
|
Syed2135/StyleGAN2
|
34e74aee7b72c9337b58c8e8c0446941fddbd1de
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
Syed2135/StyleGAN2
|
34e74aee7b72c9337b58c8e8c0446941fddbd1de
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
Syed2135/StyleGAN2
|
34e74aee7b72c9337b58c8e8c0446941fddbd1de
|
[
"BSD-Source-Code"
] | null | null | null |
import os as alpha
alpha.system("nvidia-smi")
| 15.666667
| 26
| 0.744681
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 2
| 27
| 23.5
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bc7b460fcb85b72557ba141c95d687dbc76ab34d
| 48
|
py
|
Python
|
pyns2/siml/error.py
|
terassyi/netns-siml
|
6019a73e58e8defaed00111598e5fcca31208a5d
|
[
"MIT"
] | 2
|
2020-12-22T16:30:18.000Z
|
2021-01-23T08:32:01.000Z
|
pyns2/siml/error.py
|
terassyi/netns-siml
|
6019a73e58e8defaed00111598e5fcca31208a5d
|
[
"MIT"
] | 1
|
2020-12-20T12:16:27.000Z
|
2020-12-20T12:16:27.000Z
|
pyns2/siml/error.py
|
terassyi/pyns2
|
6019a73e58e8defaed00111598e5fcca31208a5d
|
[
"MIT"
] | null | null | null |
class SimlCreateException(Exception):
pass
| 12
| 37
| 0.770833
| 4
| 48
| 9.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 3
| 38
| 16
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bca94085f378a1a20aefc8bfadc55026742264c4
| 160
|
py
|
Python
|
luna/gateware/test/__init__.py
|
gregdavill/luna
|
f42e64d5ce0b3cf840e985d4427deac5b050e31b
|
[
"BSD-3-Clause"
] | 4
|
2020-02-11T18:40:02.000Z
|
2020-04-03T13:07:38.000Z
|
luna/gateware/test/__init__.py
|
ktemkin/luna
|
661dc89f7f60ba8a51165f7f8037ad2d5854cf34
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/test/__init__.py
|
ktemkin/luna
|
661dc89f7f60ba8a51165f7f8037ad2d5854cf34
|
[
"BSD-3-Clause"
] | 1
|
2021-01-16T00:40:49.000Z
|
2021-01-16T00:40:49.000Z
|
#
# This file is part of LUNA.
#
from .utils import \
LunaGatewareTestCase, \
sync_test_case, \
ulpi_domain_test_case, \
fast_domain_test_case
| 16
| 28
| 0.69375
| 21
| 160
| 4.904762
| 0.761905
| 0.23301
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23125
| 160
| 9
| 29
| 17.777778
| 0.837398
| 0.1625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.