hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cf36d73a694dbe84a9d53b6e232436f71c8212a
| 652
|
py
|
Python
|
_2019/__init__.py
|
JHowell45/advent-of-code
|
c7bdd3881573f259af2cb826d0c77bbef0b3417e
|
[
"MIT"
] | null | null | null |
_2019/__init__.py
|
JHowell45/advent-of-code
|
c7bdd3881573f259af2cb826d0c77bbef0b3417e
|
[
"MIT"
] | null | null | null |
_2019/__init__.py
|
JHowell45/advent-of-code
|
c7bdd3881573f259af2cb826d0c77bbef0b3417e
|
[
"MIT"
] | null | null | null |
try:
from _2019.src.day_1 import day_1_puzzle_1_solution, day_1_puzzle_2_solution
except ImportError:
from src.day_1 import day_1_puzzle_1_solution, day_1_puzzle_2_solution
try:
from _2019.src.day_2 import day_2_puzzle_1_solution, day_2_puzzle_2_solution
except ImportError:
from src.day_2 import day_2_puzzle_1_solution, day_2_puzzle_2_solution
def main():
print(f"Day 1, Puzzle 1: {day_1_puzzle_1_solution()}")
print(f"Day 1, Puzzle 2: {day_1_puzzle_2_solution()}")
print(f"Day 2, Puzzle 1: {day_2_puzzle_1_solution()}")
print(f"Day 2, Puzzle 2: {day_2_puzzle_2_solution()}")
if __name__ == "__main__":
main()
| 32.6
| 80
| 0.759202
| 118
| 652
| 3.669492
| 0.152542
| 0.092379
| 0.184758
| 0.101617
| 0.942263
| 0.750577
| 0.568129
| 0.568129
| 0.471132
| 0.471132
| 0
| 0.078712
| 0.142638
| 652
| 19
| 81
| 34.315789
| 0.695886
| 0
| 0
| 0.266667
| 0
| 0
| 0.282209
| 0.165644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| true
| 0
| 0.4
| 0
| 0.466667
| 0.266667
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6b03cd8ddefb73e8cf4168d0f4355fe3ec89e67b
| 764
|
py
|
Python
|
tests/snippet/is_type.py
|
dem4ply/chibi
|
1f13db8200e8e60bbb839436d4c995d6b6220957
|
[
"WTFPL"
] | null | null | null |
tests/snippet/is_type.py
|
dem4ply/chibi
|
1f13db8200e8e60bbb839436d4c995d6b6220957
|
[
"WTFPL"
] | null | null | null |
tests/snippet/is_type.py
|
dem4ply/chibi
|
1f13db8200e8e60bbb839436d4c995d6b6220957
|
[
"WTFPL"
] | null | null | null |
from unittest import TestCase
from chibi.snippet import is_type
class Test_is_type(TestCase):
def setUp( self ):
pass
def test_is_iter_list( self ):
self.assertTrue( is_type.is_iter( [ 1, 2, 3 ] ) )
self.assertTrue( is_type.is_iter( [] ) )
self.assertTrue( is_type.is_iter( list() ) )
def test_is_iter_tuple( self ):
self.assertTrue( is_type.is_iter( ( 1, 2, 3 ) ) )
self.assertTrue( is_type.is_iter( tuple() ) )
def test_is_iter_dict( self ):
self.assertTrue( is_type.is_iter( { 'foo': 1 } ) )
self.assertTrue( is_type.is_iter( dict() ) )
def test_is_iter_string( self ):
self.assertTrue( is_type.is_iter( "foo" ) )
self.assertTrue( is_type.is_iter( "" ) )
| 30.56
| 58
| 0.61911
| 109
| 764
| 4.036697
| 0.220183
| 0.177273
| 0.327273
| 0.409091
| 0.595455
| 0.595455
| 0.418182
| 0.418182
| 0.268182
| 0.268182
| 0
| 0.012259
| 0.252618
| 764
| 24
| 59
| 31.833333
| 0.758319
| 0
| 0
| 0
| 0
| 0
| 0.007853
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.277778
| false
| 0.055556
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6b191347a4d5af477c627d6715ce9c91bed23c17
| 237
|
py
|
Python
|
aws_iam_policies/aws_root_account_mfa.py
|
glerb/panther-analysis
|
bc8518644e0a8fc7735576a700aa7269b3257546
|
[
"Apache-2.0"
] | null | null | null |
aws_iam_policies/aws_root_account_mfa.py
|
glerb/panther-analysis
|
bc8518644e0a8fc7735576a700aa7269b3257546
|
[
"Apache-2.0"
] | null | null | null |
aws_iam_policies/aws_root_account_mfa.py
|
glerb/panther-analysis
|
bc8518644e0a8fc7735576a700aa7269b3257546
|
[
"Apache-2.0"
] | null | null | null |
from panther_base_helpers import deep_get
def policy(resource):
# Explicit check for True as the value may be None, and we want to return a bool not a NoneType
return deep_get(resource, 'CredentialReport', 'MfaActive') is True
| 33.857143
| 99
| 0.763713
| 38
| 237
| 4.657895
| 0.842105
| 0.079096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181435
| 237
| 6
| 100
| 39.5
| 0.912371
| 0.392405
| 0
| 0
| 0
| 0
| 0.176056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
861d7d3b723e42e9df0e81da6e2c51322c93c626
| 637
|
py
|
Python
|
cupyx/scipy/fft/__init__.py
|
pri1311/cupy
|
415be9536582ba86dbbb3e98bc14db4877a242c6
|
[
"MIT"
] | null | null | null |
cupyx/scipy/fft/__init__.py
|
pri1311/cupy
|
415be9536582ba86dbbb3e98bc14db4877a242c6
|
[
"MIT"
] | null | null | null |
cupyx/scipy/fft/__init__.py
|
pri1311/cupy
|
415be9536582ba86dbbb3e98bc14db4877a242c6
|
[
"MIT"
] | null | null | null |
# flake8: NOQA
from cupyx.scipy.fft._fft import (
fft, ifft, fft2, ifft2, fftn, ifftn,
rfft, irfft, rfft2, irfft2, rfftn, irfftn,
hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn
)
from cupyx.scipy.fft._fft import (
__ua_domain__, __ua_convert__, __ua_function__)
from cupyx.scipy.fft._fft import _scipy_150, _scipy_160
from cupyx.scipy.fft._fftlog import fht, ifht
from cupyx.scipy.fft._helper import next_fast_len # NOQA
from cupy.fft import fftshift, ifftshift, fftfreq, rfftfreq
from cupyx.scipy.fftpack import get_fft_plan
from cupyx.scipy.fft._realtransforms import (
dct, dctn, dst, dstn, idct, idctn, idst, idstn
)
| 37.470588
| 59
| 0.756672
| 96
| 637
| 4.71875
| 0.5625
| 0.139073
| 0.216336
| 0.225166
| 0.172185
| 0.172185
| 0
| 0
| 0
| 0
| 0
| 0.023941
| 0.147567
| 637
| 16
| 60
| 39.8125
| 0.810313
| 0.026688
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.533333
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8653eebfd170968f45726eaf56569813c4ac4801
| 17
|
py
|
Python
|
tests/__init__.py
|
MyWebIntelligence/MyWebIntelligencePython
|
23075e41140cf3ef841b20d8670a845060e24cac
|
[
"MIT"
] | 6
|
2018-10-15T14:01:10.000Z
|
2021-04-20T10:45:58.000Z
|
tests/__init__.py
|
MyWebIntelligence/MyWebIntelligencePython
|
23075e41140cf3ef841b20d8670a845060e24cac
|
[
"MIT"
] | 9
|
2019-04-25T15:14:40.000Z
|
2020-06-03T11:22:43.000Z
|
tests/__init__.py
|
MyWebIntelligence/MyWebIntelligencePython
|
23075e41140cf3ef841b20d8670a845060e24cac
|
[
"MIT"
] | 2
|
2019-07-12T09:48:20.000Z
|
2019-10-24T08:54:20.000Z
|
from mwi import *
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
866f2c4793bf87953f39c0c646cf3b6d4e5cf81a
| 4,596
|
py
|
Python
|
curiosity_mask/util.py
|
machineteaching-io/stable-baselines
|
58c218de12d61d313bc5e9877a505668ff0bf661
|
[
"MIT"
] | 1
|
2020-07-03T19:40:10.000Z
|
2020-07-03T19:40:10.000Z
|
curiosity_mask/util.py
|
machineteaching-io/stable-baselines
|
58c218de12d61d313bc5e9877a505668ff0bf661
|
[
"MIT"
] | null | null | null |
curiosity_mask/util.py
|
machineteaching-io/stable-baselines
|
58c218de12d61d313bc5e9877a505668ff0bf661
|
[
"MIT"
] | 1
|
2020-04-22T21:41:22.000Z
|
2020-04-22T21:41:22.000Z
|
from gym.spaces import MultiDiscrete
def create_dummy_action_mask(ac_spaces: MultiDiscrete):
action_mask = []
for i, space_size in enumerate(ac_spaces.nvec):
mask = None
for j, size in enumerate(ac_spaces.nvec[i::-1]):
if j == 0:
mask = [0] * size
else:
mask = [mask] * size
action_mask.append(mask)
return action_mask
def create_negative_action_mask(ac_spaces: MultiDiscrete):
action_mask = []
for i, space_size in enumerate(ac_spaces.nvec):
mask = None
for j, size in enumerate(ac_spaces.nvec[i::-1]):
if j == 0:
mask = [0] * size
else:
mask = [mask] * size
action_mask.append(mask)
return action_mask
#action_space = MultiDiscrete([3, 2, 3, 2, 2])
#print(create_dummy_action_mask(action_space)[2])
# 0 - Gait [G1, G2, G3]
# 1 - Left Hip for each gait [[G1xLH1, G1xLH2], [G2xLH1, G2xLH2], [G3xLH1, G3xLH2]]
# 2 - Left Knee for each gait x Left Hip [[[G1xLH1xLK1, G1xLH1xLK2], [G1xLH2xLK1, G1xLH2xLK2]], [[G2xLH1xLK1, G2xLH1xLK2], [G2xLH2xLK1, G2xLH2xLK2]], [[G3xLH1xLK1, G3xLH1xLK2], [G3xLH2xLK1, G3xLH2xLK2]]]
# 3 - Right Hip for each gait x Left Hip x Left Knee
# 4 - Right Knee for each gait x Left Hip x Left Knee x Right Hip
left_hip = left_knee = right_hip = right_knee = range(21)
def set_action_mask_gait(gait, ranges, mask):
for left_hip_index in list(filter(lambda x: x >= ranges['left_hip']['min'] and x <= ranges['left_hip']['max'], left_hip)):
mask[1][gait][left_hip_index] = 1
for left_knee_index in list(filter(lambda x: x >= ranges['left_knee']['min'] and x <= ranges['left_knee']['max'], left_knee)):
mask[2][gait][left_hip_index][left_knee_index] = 1
for right_hip_index in list(filter(lambda x: x >= ranges['right_hip']['min'] and x <= ranges['right_hip']['max'], right_hip)):
mask[3][gait][left_hip_index][left_knee_index][right_hip_index] = 1
for right_knee_index in list(filter(lambda x: x >= ranges['right_knee']['min'] and x <= ranges['right_knee']['max'], right_knee)):
mask[4][gait][left_hip_index][left_knee_index][right_hip_index][right_knee_index] = 1
return mask
def test_mask(action, state, teaching, swinging_leg):
if state == 'start' or state == 'lift_leg':
gait = 0
if state == 'plant_leg':
gait = 1
if state == 'switch_leg':
gait = 2
gait_ref = str(gait+1)
if swinging_leg == 'left':
ranges = {'left_hip': {'min': teaching['gait-' + gait_ref + '-swinging-hip-min'], 'max': teaching['gait-' + gait_ref + '-swinging-hip-max']},
'left_knee': {'min': teaching['gait-' + gait_ref + '-swinging-knee-min'], 'max': teaching['gait-' + gait_ref + '-swinging-knee-max']},
'right_hip': {'min': teaching['gait-' + gait_ref + '-planted-hip-min'], 'max': teaching['gait-' + gait_ref + '-planted-hip-max']},
'right_knee': {'min': teaching['gait-' + gait_ref + '-planted-knee-min'], 'max': teaching['gait-' + gait_ref + '-planted-knee-max']}
}
if swinging_leg == 'right':
ranges = {'left_hip': {'min': teaching['gait-' + gait_ref + '-planted-hip-min'], 'max': teaching['gait-' + gait_ref + '-planted-hip-max']},
'left_knee': {'min': teaching['gait-' + gait_ref + '-planted-knee-min'], 'max': teaching['gait-' + gait_ref + '-planted-knee-max']},
'right_hip': {'min': teaching['gait-' + gait_ref + '-swinging-hip-min'], 'max': teaching['gait-' + gait_ref + '-swinging-hip-max']},
'right_knee': {'min': teaching['gait-' + gait_ref + '-swinging-knee-min'], 'max': teaching['gait-' + gait_ref + '-swinging-knee-max']}
}
action_list = []
for left_hip_index in list(filter(lambda x: x >= ranges['left_hip']['min'] and x <= ranges['left_hip']['max'], left_hip)):
for left_knee_index in list(filter(lambda x: x >= ranges['left_knee']['min'] and x <= ranges['left_knee']['max'], left_knee)):
for right_hip_index in list(filter(lambda x: x >= ranges['right_hip']['min'] and x <= ranges['right_hip']['max'], right_hip)):
for right_knee_index in list(filter(lambda x: x >= ranges['right_knee']['min'] and x <= ranges['right_knee']['max'], right_knee)):
action_list.append((left_hip_index, left_knee_index, right_hip_index, right_knee_index))
#print(action_list)
return action in action_list
| 54.714286
| 203
| 0.602263
| 637
| 4,596
| 4.131868
| 0.130298
| 0.053191
| 0.097264
| 0.115502
| 0.735562
| 0.735562
| 0.735562
| 0.714286
| 0.706687
| 0.674772
| 0
| 0.023144
| 0.229112
| 4,596
| 83
| 204
| 55.373494
| 0.719729
| 0.115753
| 0
| 0.459016
| 0
| 0
| 0.173817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.016393
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8677b0541e8ceae8f14244824c43da21836f0602
| 72
|
py
|
Python
|
CodeWars/7 Kyu/All Star Code Challenge #16.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/All Star Code Challenge #16.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/All Star Code Challenge #16.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def no_repeat(stg):
return next(c for c in stg if stg.count(c) == 1)
| 36
| 52
| 0.652778
| 16
| 72
| 2.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.208333
| 72
| 2
| 52
| 36
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
867ae8f2ed0c14a9f4efd26a5cfe51f30df83407
| 28,932
|
py
|
Python
|
astroquery/gaia/tests/test_gaiatap.py
|
ivvv/astroquery
|
35a6cd331722523d034265ca015acea4f4ba6f03
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/gaia/tests/test_gaiatap.py
|
ivvv/astroquery
|
35a6cd331722523d034265ca015acea4f4ba6f03
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/gaia/tests/test_gaiatap.py
|
ivvv/astroquery
|
35a6cd331722523d034265ca015acea4f4ba6f03
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
Gaia TAP plus
=============
@author: Juan Carlos Segovia
@contact: juan.carlos.segovia@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
import os
from unittest.mock import patch
import pytest
from requests import HTTPError
from astroquery.gaia import conf
from astroquery.gaia.core import GaiaClass
from astroquery.gaia.tests.DummyTapHandler import DummyTapHandler
from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler
from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse
import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
import numpy as np
from astroquery.utils.tap.xmlparser import utils
from astroquery.utils.tap.core import TapPlus, TAP_CLIENT_ID
from astroquery.utils.tap import taputils
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
class TestTap:
def test_show_message(self):
connHandler = DummyConnHandler()
dummy_response = DummyResponse()
dummy_response.set_status_code(200)
dummy_response.set_message("OK")
message_text = "1653401204784D[type: -100,-1]=Gaia dev is under maintenance"
dummy_response.set_data(method='GET',
context=None,
body=message_text,
headers=None)
connHandler.set_default_response(dummy_response)
# show_server_messages
tableRequest = 'notification?action=GetNotifications'
connHandler.set_response(tableRequest, dummy_response)
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = GaiaClass(connHandler, tapplus, show_server_messages=True)
def test_query_object(self):
conn_handler = DummyConnHandler()
# Launch response: we use default response because the query contains
# decimals
dummy_response = DummyResponse()
dummy_response.set_status_code(200)
dummy_response.set_message("OK")
message_text = "1653401204784D[type: -100,-1]=Gaia dev is under maintenance"
dummy_response.set_data(method='GET',
context=None,
body=message_text,
headers=None)
conn_handler.set_default_response(dummy_response)
# show_server_messages
tableRequest = 'notification?action=GetNotifications'
conn_handler.set_response(tableRequest, dummy_response)
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=True)
# Launch response: we use default response because the query contains
# decimals
response_launch_job = DummyResponse()
response_launch_job.set_status_code(200)
response_launch_job.set_message("OK")
job_data_file = data_path('job_1.vot')
job_data = utils.read_file_content(job_data_file)
response_launch_job.set_data(method='POST',
context=None,
body=job_data,
headers=None)
# The query contains decimals: force default response
conn_handler.set_default_response(response_launch_job)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),
frame='icrs')
with pytest.raises(ValueError) as err:
tap.query_object(sc)
assert "Missing required argument: width" in err.value.args[0]
width = Quantity(12, u.deg)
with pytest.raises(ValueError) as err:
tap.query_object(sc, width=width)
assert "Missing required argument: height" in err.value.args[0]
height = Quantity(10, u.deg)
table = tap.query_object(sc, width=width, height=height)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_object(sc, radius=radius)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_query_object_async(self):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
jobid = '12345'
# Launch response
response_launch_job = DummyResponse()
response_launch_job.set_status_code(303)
response_launch_job.set_message("OK")
# list of list (httplib implementation for headers in response)
launch_response_headers = [
['location', 'http://test:1111/tap/async/' + jobid]
]
response_launch_job.set_data(method='POST',
context=None,
body=None,
headers=launch_response_headers)
conn_handler.set_default_response(response_launch_job)
# Phase response
response_phase = DummyResponse()
response_phase.set_status_code(200)
response_phase.set_message("OK")
response_phase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
conn_handler.set_response(req, response_phase)
# Results response
response_results_job = DummyResponse()
response_results_job.set_status_code(200)
response_results_job.set_message("OK")
job_data_file = data_path('job_1.vot')
job_data = utils.read_file_content(job_data_file)
response_results_job.set_data(method='GET',
context=None,
body=job_data,
headers=None)
req = "async/" + jobid + "/results/result"
conn_handler.set_response(req, response_results_job)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),
frame='icrs')
width = Quantity(12, u.deg)
height = Quantity(10, u.deg)
table = tap.query_object_async(sc, width=width, height=height)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_object_async(sc, radius=radius)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_cone_search_sync(self):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
# Launch response: we use default response because the query contains
# decimals
response_launch_job = DummyResponse()
response_launch_job.set_status_code(200)
response_launch_job.set_message("OK")
job_data_file = data_path('job_1.vot')
job_data = utils.read_file_content(job_data_file)
response_launch_job.set_data(method='POST',
context=None,
body=job_data,
headers=None)
ra = 19.0
dec = 20.0
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
conn_handler.set_default_response(response_launch_job)
job = tap.cone_search(sc, radius)
assert job is not None, "Expected a valid job"
assert job.async_ is False, "Expected a synchronous job"
assert job.get_phase() == 'COMPLETED', \
"Wrong job phase. Expected: %s, found %s" % \
('COMPLETED', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(results))
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_cone_search_async(self):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
jobid = '12345'
# Launch response
response_launch_job = DummyResponse()
response_launch_job.set_status_code(303)
response_launch_job.set_message("OK")
# list of list (httplib implementation for headers in response)
launch_response_headers = [
['location', 'http://test:1111/tap/async/' + jobid]
]
response_launch_job.set_data(method='POST',
context=None,
body=None,
headers=launch_response_headers)
ra = 19
dec = 20
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
conn_handler.set_default_response(response_launch_job)
# Phase response
response_phase = DummyResponse()
response_phase.set_status_code(200)
response_phase.set_message("OK")
response_phase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
conn_handler.set_response(req, response_phase)
# Results response
response_results_job = DummyResponse()
response_results_job.set_status_code(200)
response_results_job.set_message("OK")
job_data_file = data_path('job_1.vot')
job_data = utils.read_file_content(job_data_file)
response_results_job.set_data(method='GET',
context=None,
body=job_data,
headers=None)
req = "async/" + jobid + "/results/result"
conn_handler.set_response(req, response_results_job)
job = tap.cone_search_async(sc, radius)
assert job is not None, "Expected a valid job"
assert job.async_ is True, "Expected an asynchronous job"
assert job.get_phase() == 'COMPLETED', \
"Wrong job phase. Expected: %s, found %s" % \
('COMPLETED', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(results))
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
# Regression test for #2093 and #2099 - changing the MAIN_GAIA_TABLE
# had no effect.
# The preceding tests should have used the default value.
assert 'gaiadr2.gaia_source' in job.parameters['query']
# Test changing the table name through conf.
conf.MAIN_GAIA_TABLE = 'name_from_conf'
job = tap.cone_search_async(sc, radius)
assert 'name_from_conf' in job.parameters['query']
# Changing the value through the class should overrule conf.
tap.MAIN_GAIA_TABLE = 'name_from_class'
job = tap.cone_search_async(sc, radius)
assert 'name_from_class' in job.parameters['query']
# Cleanup.
conf.reset('MAIN_GAIA_TABLE')
def __check_results_column(self, results, column_name, description, unit,
data_type):
c = results[column_name]
assert c.description == description, \
"Wrong description for results column '%s'. " % \
"Expected: '%s', found '%s'" % \
(column_name, description, c.description)
assert c.unit == unit, \
"Wrong unit for results column '%s'. " % \
"Expected: '%s', found '%s'" % \
(column_name, unit, c.unit)
assert c.dtype == data_type, \
"Wrong dataType for results column '%s'. " % \
"Expected: '%s', found '%s'" % \
(column_name, data_type, c.dtype)
def test_load_data(self):
dummy_handler = DummyTapHandler()
tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)
ids = "1,2,3,4"
retrieval_type = "epoch_photometry"
valid_data = True
band = None
format = "votable"
verbose = True
data_structure = "INDIVIDUAL"
output_file = os.path.abspath("output_file")
path_to_end_with = os.path.join("gaia", "test", "output_file")
if not output_file.endswith(path_to_end_with):
output_file = os.path.abspath(path_to_end_with)
params_dict = {}
params_dict['VALID_DATA'] = "true"
params_dict['ID'] = ids
params_dict['FORMAT'] = str(format)
params_dict['RETRIEVAL_TYPE'] = str(retrieval_type)
params_dict['DATA_STRUCTURE'] = str(data_structure)
params_dict['USE_ZIP_ALWAYS'] = 'true'
tap.load_data(ids=ids,
retrieval_type=retrieval_type,
valid_data=valid_data,
band=band,
format=format,
verbose=verbose,
output_file=output_file)
parameters = {}
parameters['params_dict'] = params_dict
# Output file name contains a timestamp: cannot be verified
of = dummy_handler._DummyTapHandler__parameters['output_file']
parameters['output_file'] = of
parameters['verbose'] = verbose
dummy_handler.check_call('load_data', parameters)
def test_get_datalinks(self):
dummy_handler = DummyTapHandler()
tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)
ids = ["1", "2", "3", "4"]
verbose = True
parameters = {}
parameters['ids'] = ids
parameters['verbose'] = verbose
tap.get_datalinks(ids, verbose)
dummy_handler.check_call('get_datalinks', parameters)
def test_xmatch(self):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
jobid = '12345'
# Launch response
response_launch_job = DummyResponse()
response_launch_job.set_status_code(303)
response_launch_job.set_message("OK")
# list of list (httplib implementation for headers in response)
launch_response_headers = [
['location', 'http://test:1111/tap/async/' + jobid]
]
response_launch_job.set_data(method='POST',
context=None,
body=None,
headers=launch_response_headers)
conn_handler.set_default_response(response_launch_job)
# Phase response
response_phase = DummyResponse()
response_phase.set_status_code(200)
response_phase.set_message("OK")
response_phase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
conn_handler.set_response(req, response_phase)
# Results response
response_results_job = DummyResponse()
response_results_job.set_status_code(200)
response_results_job.set_message("OK")
job_data_file = data_path('job_1.vot')
job_data = utils.read_file_content(job_data_file)
response_results_job.set_data(method='GET',
context=None,
body=job_data,
headers=None)
req = "async/" + jobid + "/results/result"
conn_handler.set_response(req, response_results_job)
query = ("SELECT crossmatch_positional(",
"'schemaA','tableA','schemaB','tableB',1.0,'results')",
"FROM dual;")
d_tmp = {"q": query}
d_tmp_encoded = conn_handler.url_encode(d_tmp)
p = d_tmp_encoded.find("=")
q = d_tmp_encoded[p + 1:]
dict_tmp = {
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable",
"tapclient": str(TAP_CLIENT_ID),
"PHASE": "RUN",
"QUERY": str(q)}
sorted_key = taputils.taputil_create_sorted_dict_key(dict_tmp)
job_request = "sync?" + sorted_key
conn_handler.set_response(job_request, response_launch_job)
# check parameters
# missing table A
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a=None,
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results')
assert "Table name A argument is mandatory" in err.value.args[0]
# missing schema A
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results')
assert "Not found schema name in full qualified table A: 'tableA'" \
in err.value.args[0]
# missing table B
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b=None,
results_table_name='results')
assert "Table name B argument is mandatory" in err.value.args[0]
# missing schema B
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='tableB',
results_table_name='results')
assert "Not found schema name in full qualified table B: 'tableB'" \
in err.value.args[0]
# missing results table
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name=None)
assert "Results table name argument is mandatory" in err.value.args[0]
# wrong results table (with schema)
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='schema.results')
assert "Please, do not specify schema for 'results_table_name'" \
in err.value.args[0]
# radius < 0.1
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results', radius=0.01)
assert "Invalid radius value. Found 0.01, valid range is: 0.1 to 10.0" \
in err.value.args[0]
# radius > 10.0
with pytest.raises(ValueError) as err:
tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results', radius=10.1)
assert "Invalid radius value. Found 10.1, valid range is: 0.1 to 10.0" \
in err.value.args[0]
# check default parameters
parameters = {}
query = "SELECT crossmatch_positional(\
'schemaA','tableA',\
'schemaB','tableB',\
1.0,\
'results')\
FROM dual;"
parameters['query'] = query
parameters['name'] = 'results'
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['background'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
job = tap.cross_match(full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results')
assert job.async_ is True, "Expected an asynchronous job"
assert job.get_phase() == 'COMPLETED', \
"Wrong job phase. Expected: %s, found %s" % \
('COMPLETED', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
job = tap.cross_match(
full_qualified_table_name_a='schemaA.tableA',
full_qualified_table_name_b='schemaB.tableB',
results_table_name='results',
background=True)
assert job.async_ is True, "Expected an asynchronous job"
assert job.get_phase() == 'EXECUTING', \
"Wrong job phase. Expected: %s, found %s" % \
('EXECUTING', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
@patch.object(TapPlus, 'login')
def test_login(self, mock_login):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
tap.login("user", "password")
assert (mock_login.call_count == 2)
mock_login.side_effect = HTTPError("Login error")
tap.login("user", "password")
assert (mock_login.call_count == 3)
@patch.object(TapPlus, 'login_gui')
@patch.object(TapPlus, 'login')
def test_login_gui(self, mock_login_gui, mock_login):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
tap.login_gui()
assert (mock_login_gui.call_count == 1)
mock_login_gui.side_effect = HTTPError("Login error")
tap.login("user", "password")
assert (mock_login.call_count == 1)
@patch.object(TapPlus, 'logout')
def test_logout(self, mock_logout):
conn_handler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=conn_handler)
tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)
tap.logout()
assert (mock_logout.call_count == 2)
mock_logout.side_effect = HTTPError("Login error")
tap.logout()
assert (mock_logout.call_count == 3)
| 44.442396
| 84
| 0.531937
| 2,912
| 28,932
| 5.026099
| 0.106113
| 0.029311
| 0.0302
| 0.036075
| 0.763323
| 0.740776
| 0.729024
| 0.714676
| 0.707297
| 0.699508
| 0
| 0.017165
| 0.377782
| 28,932
| 650
| 85
| 44.510769
| 0.795856
| 0.050429
| 0
| 0.689964
| 0
| 0
| 0.131661
| 0.005325
| 0
| 0
| 0
| 0
| 0.075269
| 1
| 0.023297
| false
| 0.005376
| 0.028674
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
869de55e2979204696bbf74b3181717195af29ad
| 14,522
|
py
|
Python
|
tests/core/full_node/test_block_height_map.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/test_block_height_map.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
tests/core/full_node/test_block_height_map.py
|
zcomputerwiz/replaceme-blockchain
|
b6dfc3ff502e0cb6b7b5fbc566c7fb9ae559757a
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import struct
from replaceme.full_node.block_height_map import BlockHeightMap
from replaceme.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from replaceme.util.db_wrapper import DBWrapper
from tests.util.db_connection import DBConnection
from replaceme.types.blockchain_format.sized_bytes import bytes32
from typing import Optional
from replaceme.util.ints import uint8
# from tests.conftest import tmp_dir
def gen_block_hash(height: int) -> bytes32:
# TODO: address hint errors and remove ignores
# error: Incompatible return value type (got "bytes", expected "bytes32") [return-value]
return struct.pack(">I", height + 1) * (32 // 4) # type: ignore[return-value]
def gen_ses(height: int) -> SubEpochSummary:
prev_ses = gen_block_hash(height + 0xFA0000)
reward_chain_hash = gen_block_hash(height + 0xFC0000)
return SubEpochSummary(prev_ses, reward_chain_hash, uint8(0), None, None)
async def new_block(
db: DBWrapper,
block_hash: bytes32,
parent: bytes32,
height: int,
is_peak: bool,
ses: Optional[SubEpochSummary],
):
if db.db_version == 2:
cursor = await db.db.execute(
"INSERT INTO block_records VALUES(?, ?, ?, ?, ?)",
(
block_hash,
parent,
height,
# sub epoch summary
None if ses is None else bytes(ses),
is_peak,
),
)
await cursor.close()
else:
cursor = await db.db.execute(
"INSERT INTO block_records VALUES(?, ?, ?, ?, ?)",
(
block_hash.hex(),
parent.hex(),
height,
# sub epoch summary
None if ses is None else bytes(ses),
is_peak,
),
)
await cursor.close()
async def setup_db(db: DBWrapper):
if db.db_version == 2:
await db.db.execute(
"CREATE TABLE IF NOT EXISTS block_records("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob,"
"is_peak tinyint)"
)
await db.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await db.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await db.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
else:
await db.db.execute(
"CREATE TABLE IF NOT EXISTS block_records("
"header_hash text PRIMARY KEY,"
"prev_hash text,"
"height bigint,"
"sub_epoch_summary blob,"
"is_peak tinyint)"
)
await db.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await db.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await db.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
# if chain_id != 0, the last block in the chain won't be considered the peak,
# and the chain_id will be mixed in to the hashes, to form a separate chain at
# the same heights as the main chain
async def setup_chain(
db: DBWrapper, length: int, *, chain_id: int = 0, ses_every: Optional[int] = None, start_height=0
):
height = start_height
peak_hash = gen_block_hash(height + chain_id * 65536)
parent_hash = bytes32([0] * 32)
while height < length:
ses = None
if ses_every is not None and height % ses_every == 0:
ses = gen_ses(height)
await new_block(db, peak_hash, parent_hash, height, False, ses)
height += 1
parent_hash = peak_hash
peak_hash = gen_block_hash(height + chain_id * 65536)
# we only set is_peak=1 for chain_id 0
await new_block(db, peak_hash, parent_hash, height, chain_id == 0, None)
class TestBlockHeightMap:
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_height_to_hash(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert not height_map.contains_height(11)
for height in reversed(range(10)):
assert height_map.contains_height(height)
for height in reversed(range(10)):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_height_to_hash_long_chain(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(1000)):
assert height_map.contains_height(height)
for height in reversed(range(10000)):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_save_restore(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(10000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
await height_map.maybe_flush()
del height_map
# To ensure we're actually loading from cache, and not the DB, clear
# the table (but we still need the peak). We need at least 20 blocks
# in the DB since we keep loading until we find a match of both hash
# and sub epoch summary. In this test we have a sub epoch summary
# every 20 blocks, so we generate the 30 last blocks only
await db_wrapper.db.execute("DROP TABLE block_records")
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000, ses_every=20, start_height=9970)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(10000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_restore_extend(self, tmp_dir, db_version):
# test the case where the cache has fewer blocks than the DB, and that
# we correctly load all the missing blocks from the DB to update the
# cache
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 2000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(2000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
await height_map.maybe_flush()
del height_map
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
# add 2000 blocks to the chain
await setup_chain(db_wrapper, 4000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
# now make sure we have the complete chain, height 0 -> 4000
for height in reversed(range(4000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_height_to_hash_with_orphans(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# set up two separate chains, but without the peak
await setup_chain(db_wrapper, 10, chain_id=1)
await setup_chain(db_wrapper, 10, chain_id=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in range(10):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_height_to_hash_update(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# orphan blocks
await setup_chain(db_wrapper, 10, chain_id=1)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in range(10):
assert height_map.get_hash(height) == gen_block_hash(height)
height_map.update_height(10, gen_block_hash(100), None)
for height in range(9):
assert height_map.get_hash(height) == gen_block_hash(height)
assert height_map.get_hash(10) == gen_block_hash(100)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_update_ses(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# orphan blocks
await setup_chain(db_wrapper, 10, chain_id=1)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
with pytest.raises(KeyError) as _:
height_map.get_ses(10)
height_map.update_height(10, gen_block_hash(10), gen_ses(10))
assert height_map.get_ses(10) == gen_ses(10)
assert height_map.get_hash(10) == gen_block_hash(10)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_height_to_ses(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
with pytest.raises(KeyError) as _:
height_map.get_ses(1)
with pytest.raises(KeyError) as _:
height_map.get_ses(3)
with pytest.raises(KeyError) as _:
height_map.get_ses(5)
with pytest.raises(KeyError) as _:
height_map.get_ses(7)
with pytest.raises(KeyError) as _:
height_map.get_ses(9)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_rollback(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
assert height_map.get_hash(5) == gen_block_hash(5)
height_map.rollback(5)
assert height_map.get_hash(5) == gen_block_hash(5)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
with pytest.raises(KeyError) as _:
height_map.get_ses(6)
with pytest.raises(KeyError) as _:
height_map.get_ses(8)
@pytest.mark.asyncio
@pytest.mark.parametrize("db_version", [1, 2])
async def test_rollback2(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
assert height_map.get_hash(6) == gen_block_hash(6)
height_map.rollback(6)
assert height_map.get_hash(6) == gen_block_hash(6)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
with pytest.raises(KeyError) as _:
height_map.get_ses(8)
| 38.115486
| 101
| 0.618303
| 1,946
| 14,522
| 4.364851
| 0.110997
| 0.087944
| 0.077702
| 0.089004
| 0.767247
| 0.743701
| 0.743701
| 0.743701
| 0.727101
| 0.659407
| 0
| 0.028298
| 0.294312
| 14,522
| 380
| 102
| 38.215789
| 0.800546
| 0.07499
| 0
| 0.683824
| 0
| 0
| 0.062943
| 0.010292
| 0
| 0
| 0.001193
| 0.002632
| 0.180147
| 1
| 0.007353
| false
| 0
| 0.033088
| 0.003676
| 0.051471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86b975f22935e975175bb869395867c77cf9c3cc
| 125
|
py
|
Python
|
daftarGuide/admin.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | 1
|
2021-09-09T02:34:59.000Z
|
2021-09-09T02:34:59.000Z
|
daftarGuide/admin.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | null | null | null |
daftarGuide/admin.py
|
irfanmaulananasution/guidian
|
b5224c58d320c81bb085a89bcc011e020207a1f2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import GuideModel
# Register your models here.
admin.site.register(GuideModel)
| 25
| 32
| 0.824
| 17
| 125
| 6.058824
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 5
| 33
| 25
| 0.927928
| 0.208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86c16d5000909ccddba7ddf13b540379e4759f95
| 3,886
|
py
|
Python
|
dublincore/migrations/0003_auto__chg_field_qualifieddublincoreelementhistory_object_id__chg_field.py
|
mredar/django-dublincore
|
eabac3fc9225d7961a5d509718c1d3059e3681cb
|
[
"BSD-3-Clause"
] | 6
|
2015-07-13T13:29:39.000Z
|
2017-09-08T20:47:35.000Z
|
dublincore/migrations/0003_auto__chg_field_qualifieddublincoreelementhistory_object_id__chg_field.py
|
mredar/django-dublincore
|
eabac3fc9225d7961a5d509718c1d3059e3681cb
|
[
"BSD-3-Clause"
] | 3
|
2020-02-11T23:26:32.000Z
|
2021-06-10T18:46:34.000Z
|
dublincore/migrations/0003_auto__chg_field_qualifieddublincoreelementhistory_object_id__chg_field.py
|
mredar/django-dublincore
|
eabac3fc9225d7961a5d509718c1d3059e3681cb
|
[
"BSD-3-Clause"
] | 2
|
2017-03-04T13:30:22.000Z
|
2019-05-10T14:04:37.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'QualifiedDublinCoreElementHistory.object_id'
db.alter_column('dublincore_qualifieddublincoreelementhistory', 'object_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'QualifiedDublinCoreElement.object_id'
db.alter_column('dublincore_qualifieddublincoreelement', 'object_id', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'QualifiedDublinCoreElementHistory.object_id'
db.alter_column('dublincore_qualifieddublincoreelementhistory', 'object_id', self.gf('django.db.models.fields.PositiveIntegerField')())
# Changing field 'QualifiedDublinCoreElement.object_id'
db.alter_column('dublincore_qualifieddublincoreelement', 'object_id', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
'dublincore.qualifieddublincoreelement': {
'Meta': {'ordering': "['term']", 'object_name': 'QualifiedDublinCoreElement'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'qualifier': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dublincore.qualifieddublincoreelementhistory': {
'Meta': {'ordering': "['term']", 'object_name': 'QualifiedDublinCoreElementHistory'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'qdce': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history'", 'null': 'True', 'to': "orm['dublincore.QualifiedDublinCoreElement']"}),
'qdce_id_stored': ('django.db.models.fields.PositiveIntegerField', [], {}),
'qualifier': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['dublincore']
| 62.677419
| 172
| 0.62069
| 380
| 3,886
| 6.197368
| 0.197368
| 0.09172
| 0.154565
| 0.220807
| 0.774947
| 0.735881
| 0.72017
| 0.72017
| 0.667941
| 0.631847
| 0
| 0.009031
| 0.1737
| 3,886
| 61
| 173
| 63.704918
| 0.724385
| 0.063047
| 0
| 0.377778
| 0
| 0
| 0.568638
| 0.381568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.088889
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
86dc77ab074e83a9518d4753551143be0c00391b
| 103
|
py
|
Python
|
main/admin.py
|
Tan-isha-Agrawal/agri
|
52c8ff1d756a0d2f20f1a17886485a8ec1583aa3
|
[
"MIT"
] | null | null | null |
main/admin.py
|
Tan-isha-Agrawal/agri
|
52c8ff1d756a0d2f20f1a17886485a8ec1583aa3
|
[
"MIT"
] | null | null | null |
main/admin.py
|
Tan-isha-Agrawal/agri
|
52c8ff1d756a0d2f20f1a17886485a8ec1583aa3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import PredResults2
admin.site.register(PredResults2)
| 25.75
| 33
| 0.825243
| 13
| 103
| 6.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.116505
| 103
| 4
| 34
| 25.75
| 0.912088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
86fa758e513bfb1dcb2314c2023253302fafd2a4
| 45,177
|
py
|
Python
|
AppDB/test/unit/test_datastore_server.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | 2
|
2018-10-09T17:48:12.000Z
|
2019-01-15T10:18:19.000Z
|
AppDB/test/unit/test_datastore_server.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | null | null | null |
AppDB/test/unit/test_datastore_server.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | 1
|
2022-02-20T20:57:12.000Z
|
2022-02-20T20:57:12.000Z
|
#!/usr/bin/env python
# Programmer: Navraj Chohan <nlake44@gmail.com>
import os
import sys
import unittest
from cassandra.cluster import Cluster
from flexmock import flexmock
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../AppServer"))
from google.appengine.api import api_base_pb
from google.appengine.datastore import entity_pb
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import appscale_info
import dbconstants
from cassandra_env.cassandra_interface import DatastoreProxy
from cassandra_env.cassandra_interface import deletions_for_entity
from cassandra_env.cassandra_interface import index_deletions
from cassandra_env.cassandra_interface import mutations_for_entity
from datastore_server import DatastoreDistributed
from datastore_server import ID_KEY_LENGTH
from datastore_server import TOMBSTONE
from dbconstants import APP_ENTITY_SCHEMA
from dbconstants import JOURNAL_SCHEMA
from zkappscale.zktransaction import TX_TIMEOUT
from zkappscale.zktransaction import ZKTransactionException
class Item(db.Model):
name = db.StringProperty(required = True)
class TestDatastoreServer(unittest.TestCase):
"""
A set of test cases for the datastore server (datastore server v2)
"""
BASIC_ENTITY = ['guestbook', 'Greeting', 'foo', 'content', 'hello world']
def get_zookeeper(self):
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
zookeeper.should_receive("get_transaction_id").and_return(1)
zookeeper.should_receive("increment_and_get_counter").and_return(0,1000)
return zookeeper
def test_get_entity_kind(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
item = Item(name="Bob", _app="hello")
key = db.model_to_protobuf(item)
self.assertEquals(dd.get_entity_kind(key), "Item")
def test_kind_key(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
item = Item(name="Dyan", _app="hello")
key = db.model_to_protobuf(item)
self.assertEquals(dd.get_kind_key("howdy", key.key().path()), "howdy\x00Item\x01Item:0000000000\x01")
item1 = Item(key_name="Bob", name="Bob", _app="hello")
key = db.model_to_protobuf(item1)
self.assertEquals(dd.get_kind_key("howdy", key.key().path()), "howdy\x00Item\x01Item:Bob\x01")
item2 = Item(key_name="Frank", name="Frank", _app="hello", parent = item1)
key = db.model_to_protobuf(item2)
self.assertEquals(dd.get_kind_key("howdy", key.key().path()),
"howdy\x00Item\x01Item:Bob\x01Item:Frank\x01")
def test_get_entity_key(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
item = Item(key_name="Bob", name="Bob", _app="hello")
key = db.model_to_protobuf(item)
self.assertEquals(str(dd.get_entity_key("howdy", key.key().path())), "howdy\x00Item:Bob\x01")
def test_validate_key(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
item = Item(key_name="Bob", name="Bob", _app="hello")
key = db.model_to_protobuf(item)
dd.validate_key(key.key())
def test_get_table_prefix(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
dd = DatastoreDistributed(db_batch, zookeeper)
item = Item(key_name="Bob", name="Bob", _app="hello")
key = db.model_to_protobuf(item)
self.assertEquals(dd.get_table_prefix(key), "hello\x00")
def test_get_index_key_from_params(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
params = ['a','b','c','d','e']
self.assertEquals(dd.get_index_key_from_params(params), "a\x00b\x00c\x00d\x00e")
def test_get_index_kv_from_tuple(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
item1 = Item(key_name="Bob", name="Bob", _app="hello")
item2 = Item(key_name="Sally", name="Sally", _app="hello")
key1 = db.model_to_protobuf(item1)
key2 = db.model_to_protobuf(item2)
tuples_list = [("a\x00b",key1),("a\x00b",key2)]
self.assertEquals(dd.get_index_kv_from_tuple(
tuples_list), (['a\x00b\x00Item\x00name\x00\x9aBob\x01\x01\x00Item:Bob\x01',
'a\x00b\x00Item:Bob\x01'],
['a\x00b\x00Item\x00name\x00\x9aSally\x01\x01\x00Item:Sally\x01',
'a\x00b\x00Item:Sally\x01']))
def test_get_composite_index_key(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
dd = flexmock(dd)
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(123)
composite_index.set_app_id("appid")
definition = composite_index.mutable_definition()
definition.set_entity_type("kind")
prop1 = definition.add_property()
prop1.set_name("prop1")
prop1.set_direction(1) # ascending
prop2 = definition.add_property()
prop2.set_name("prop2")
prop1.set_direction(1) # ascending
ent = self.get_new_entity_proto("appid", "kind", "entity_name", "prop1", "value", ns="")
self.assertEquals(dd.get_composite_index_key(composite_index, ent),
"appid\x00\x00123\x00\x9avalue\x01\x01\x00\x00kind:entity_name\x01")
def test_get_indices(self):
session = flexmock(default_consistency_level=None)
cluster = flexmock(connect=lambda keyspace: session)
flexmock(appscale_info).should_receive('get_db_ips')
flexmock(Cluster).new_instances(cluster)
flexmock(DatastoreProxy).should_receive('range_query').and_return({})
db_batch = DatastoreProxy()
self.assertEquals(db_batch.get_indices("appid"), [])
def test_delete_composite_index_metadata(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
dd = flexmock(dd)
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(1)
dd.delete_composite_index_metadata("appid", composite_index)
def test_create_composite_index(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
dd = flexmock(dd)
index = entity_pb.CompositeIndex()
index.set_app_id("appid")
index.set_state(2)
definition = index.mutable_definition()
definition.set_entity_type("kind")
definition.set_ancestor(0)
prop1 = definition.add_property()
prop1.set_name("prop1")
prop1.set_direction(1) # ascending
prop2 = definition.add_property()
prop2.set_name("prop2")
prop1.set_direction(1) # ascending
dd.create_composite_index("appid", index)
assert index.id() > 0
def test_insert_composite_indexes(self):
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(123)
composite_index.set_app_id("appid")
definition = composite_index.mutable_definition()
definition.set_entity_type("kind")
prop1 = definition.add_property()
prop1.set_name("prop1")
prop1.set_direction(1) # ascending
prop2 = definition.add_property()
prop2.set_name("prop2")
prop1.set_direction(1) # ascending
ent = self.get_new_entity_proto("appid", "kind", "entity_name", "prop1", "value", ns="")
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None).once()
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
dd.insert_composite_indexes([ent], [composite_index])
def test_allocate_ids(self):
PREFIX = "x"
BATCH_SIZE = 1000
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
self.assertEquals(dd.allocate_ids(PREFIX, BATCH_SIZE), (1, 1000))
dd = DatastoreDistributed(db_batch, self.get_zookeeper())
self.assertEquals(dd.allocate_ids(PREFIX, None, max_id=1000), (1, 1000))
try:
# Unable to use self.assertRaises because of the optional argrument max_id
ed = DatastoreDistributed(db_batch, self.get_zookeeper())
dd.allocate_ids(PREFIX, BATCH_SIZE, max_id=10)
raise "Allocate IDs should not let you set max_id and size"
except ValueError:
pass
def testFetchKeys(self):
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "bob", "prop1name",
"prop1val", ns="blah")
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return({'test\x00blah\x00test_kind:bob\x01':
{APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1}}).and_return({'test\x00blah\x00test_kind:bob\x01\x000000000002':
{JOURNAL_SCHEMA[0]: entity_proto1.Encode()}})
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
dd = DatastoreDistributed(db_batch, zookeeper)
self.assertEquals(({'test\x00blah\x00test_kind:bob\x01':
{'txnID': 1, 'entity': entity_proto1.Encode()}
},
['test\x00blah\x00test_kind:bob\x01']),
dd.fetch_keys([entity_proto1.key()]))
def test_commit_transaction(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
zookeeper = flexmock()
zookeeper.should_receive("release_lock").and_return(True)
dd = DatastoreDistributed(db_batch, zookeeper)
flexmock(dd).should_receive('apply_txn_changes')
commit_request = datastore_pb.Transaction()
commit_request.set_handle(123)
commit_request.set_app("aaa")
http_request = commit_request.Encode()
self.assertEquals(dd.commit_transaction("app_id", http_request),
(datastore_pb.CommitResponse().Encode(), 0, ""))
def test_rollback_transcation(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
zookeeper = flexmock()
zookeeper.should_receive("release_lock").and_return(True)
zookeeper.should_receive("notify_failed_transaction").and_return(True)
dd = DatastoreDistributed(db_batch, zookeeper)
commit_request = datastore_pb.Transaction()
commit_request.set_handle(123)
commit_request.set_app("aaa")
http_request = commit_request.Encode()
self.assertEquals(dd.rollback_transaction("app_id", http_request),
(api_base_pb.VoidProto().Encode(), 0, ""))
def get_new_entity_proto(self, app_id, kind, entity_name, prop_name, prop_value, ns=""):
entity_proto = datastore_pb.EntityProto()
reference = entity_proto.mutable_key()
reference.set_app(app_id)
reference.set_name_space(ns)
path = reference.mutable_path()
element = path.add_element()
element.set_type(kind)
element.set_name(entity_name)
ent_group = entity_proto.mutable_entity_group()
eg_element = ent_group.add_element()
eg_element.set_type(kind)
eg_element.set_name(entity_name)
prop = entity_proto.add_property()
prop.set_meaning(datastore_pb.Property.BYTESTRING)
prop.set_name(prop_name)
prop.set_multiple(1)
val = prop.mutable_value()
val.set_stringvalue(prop_value)
return entity_proto
def test_dynamic_put(self):
PREFIX = "x\x01"
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
zookeeper.should_receive("get_transaction_id").and_return(1)
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "bob", "prop1name",
"prop1val", ns="blah")
entity_key1 = 'test\x00blah\x00test_kind:bob\x01'
entity_proto2 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop2val", ns="blah")
entity_key2 = 'test\x00blah\x00test_kind:nancy\x01'
db_batch.should_receive('batch_get_entity').and_return(
{entity_key1: {}, entity_key2: {}})
db_batch.should_receive('batch_mutate')
dd = DatastoreDistributed(db_batch, zookeeper)
putreq_pb = datastore_pb.PutRequest()
putreq_pb.add_entity()
putreq_pb.mutable_entity(0).MergeFrom(entity_proto1)
putreq_pb.add_entity()
putreq_pb.mutable_entity(1).MergeFrom(entity_proto2)
putresp_pb = datastore_pb.PutResponse()
dd.dynamic_put('test', putreq_pb, putresp_pb)
self.assertEquals(len(putresp_pb.key_list()), 2)
def test_put_entities(self):
app_id = 'test'
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
entity_proto1 = self.get_new_entity_proto(
app_id, "test_kind", "bob", "prop1name", "prop1val", ns="blah")
entity_key1 = 'test\x00blah\x00test_kind:bob\x01'
entity_proto2 = self.get_new_entity_proto(
app_id, "test_kind", "nancy", "prop1name", "prop2val", ns="blah")
entity_key2 = 'test\x00blah\x00test_kind:nancy\x01'
entity_list = [entity_proto1, entity_proto2]
db_batch.should_receive('batch_get_entity').and_return(
{entity_key1: {}, entity_key2: {}})
db_batch.should_receive('batch_mutate')
dd = DatastoreDistributed(db_batch, zookeeper)
# Make sure it does not throw an exception
txn_hash = {entity_key1: 1, entity_key2: 1}
dd.put_entities(app_id, entity_list, txn_hash)
def test_acquire_locks_for_trans(self):
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("is_instance_wrapper").and_return(False).once()
self.assertRaises(TypeError, dd.acquire_locks_for_trans, [1], 1)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("is_instance_wrapper").and_return(True) \
.and_return(False).and_return(False)
self.assertRaises(TypeError, dd.acquire_locks_for_trans, [1], 1)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("is_instance_wrapper").and_return(True) \
.and_return(True)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("is_instance_wrapper").and_return(True) \
.and_return(True).and_return(False)
flexmock(dd).should_receive("get_table_prefix").and_return("prefix").never()
flexmock(dd).should_receive("get_root_key_from_entity_key").and_return("rootkey").never()
self.assertEquals({}, dd.acquire_locks_for_trans([], 1))
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").once()
dd = DatastoreDistributed(db_batch, zookeeper)
entity = flexmock()
entity.should_receive("app").and_return("appid")
flexmock(dd).should_receive("is_instance_wrapper").and_return(True) \
.and_return(True).and_return(True)
flexmock(dd).should_receive("get_root_key_from_entity_key").and_return("rootkey").once()
self.assertEquals({'rootkey':1}, dd.acquire_locks_for_trans([entity], 1))
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").once().and_raise(ZKTransactionException)
zookeeper.should_receive("notify_failed_transaction").once()
dd = DatastoreDistributed(db_batch, zookeeper)
entity = flexmock()
entity.should_receive("app").and_return("appid")
flexmock(dd).should_receive("is_instance_wrapper").and_return(True) \
.and_return(True).and_return(True)
flexmock(dd).should_receive("get_root_key_from_entity_key").and_return("rootkey").once()
self.assertRaises(ZKTransactionException, dd.acquire_locks_for_trans, [entity], 1)
def test_acquire_locks_for_nontrans(self):
app_id = 'test'
PREFIX = 'x\x01'
zookeeper = flexmock()
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("get_transaction_id").and_return(1).and_return(2)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return({PREFIX:{}})
db_batch.should_receive("batch_delete").and_return(None)
dd = DatastoreDistributed(db_batch, zookeeper)
entity_proto1 = self.get_new_entity_proto(
app_id, 'test_kind', "bob", "prop1name", "prop1val", ns="blah")
entity_proto2 = self.get_new_entity_proto(
app_id, "test_kind", "nancy", "prop1name", "prop2val", ns="blah")
entity_list = [entity_proto1, entity_proto2]
self.assertEquals({'test\x00blah\x00test_kind:bob\x01': 2, 'test\x00blah\x00test_kind:nancy\x01': 1},
dd.acquire_locks_for_nontrans("test", entity_list))
def test_delete_entities(self):
app_id = 'test'
entity_proto1 = self.get_new_entity_proto(
app_id, "test_kind", "bob", "prop1name", "prop1val", ns="blah")
row_key = "test\x00blah\x00test_kind:bob\x01"
row_values = {row_key: {APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: '1'}}
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_get_entity").and_return(row_values)
db_batch.should_receive('batch_mutate')
dd = DatastoreDistributed(db_batch, zookeeper)
row_keys = [entity_proto1.key()]
txn_hash = {row_key: 1}
dd.delete_entities(app_id, row_keys, txn_hash)
def test_release_put_locks_for_nontrans(self):
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("release_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(None)
db_batch.should_receive("batch_delete").and_return(None)
dd = DatastoreDistributed(db_batch, zookeeper)
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "bob", "prop1name",
"prop1val", ns="blah")
entity_proto2 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop2val", ns="blah")
entities = [entity_proto1, entity_proto2]
dd.release_locks_for_nontrans("test", entities,
{'test\x00blah\x00test_kind:bob\x01': 1, 'test\x00blah\x00test_kind:nancy\x01': 2})
def test_root_key_from_entity_key(self):
zookeeper = flexmock()
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
dd = DatastoreDistributed(db_batch, zookeeper)
self.assertEquals("test\x00blah\x00test_kind:bob\x01",
dd.get_root_key_from_entity_key("test\x00blah\x00test_kind:bob\x01nancy\x01"))
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop2val", ns="blah")
self.assertEquals("test\x00blah\x00test_kind:nancy\x01",
dd.get_root_key_from_entity_key(entity_proto1.key()))
def test_dynamic_get(self):
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop2val", ns="blah")
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(
{"test\x00blah\x00test_kind:nancy\x01":
{
APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1
}
})
dd = DatastoreDistributed(db_batch, zookeeper)
entity_key = entity_proto1.key()
get_req = datastore_pb.GetRequest()
key = get_req.add_key()
key.MergeFrom(entity_key)
get_resp = datastore_pb.GetResponse()
dd.dynamic_get("test", get_req, get_resp)
self.assertEquals(get_resp.entity_size(), 1)
# Now test while in a transaction
get_resp = datastore_pb.GetResponse()
get_req.mutable_transaction().set_handle(1)
dd.dynamic_get("test", get_req, get_resp)
self.assertEquals(get_resp.entity_size(), 1)
def test_ancestor_query(self):
query = datastore_pb.Query()
ancestor = query.mutable_ancestor()
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop1val", ns="blah")
entity_key = entity_proto1.key()
get_req = datastore_pb.GetRequest()
key = get_req.add_key()
key.MergeFrom(entity_key)
ancestor.MergeFrom(entity_key)
filter_info = []
tombstone1 = {'key': {APP_ENTITY_SCHEMA[0]:TOMBSTONE, APP_ENTITY_SCHEMA[1]: 1}}
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_get_entity").and_return(
{"test\x00blah\x00test_kind:nancy\x01":
{
APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1
}
})
db_batch.should_receive("batch_put_entity").and_return(None)
entity_proto1 = {'test\x00blah\x00test_kind:nancy\x01':{APP_ENTITY_SCHEMA[0]:entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1}}
db_batch.should_receive("range_query").and_return([entity_proto1, tombstone1]).and_return([])
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("is_in_transaction").and_return(False)
dd = DatastoreDistributed(db_batch, zookeeper)
dd.ancestor_query(query, filter_info, None)
# Now with a transaction
transaction = query.mutable_transaction()
transaction.set_handle(2)
dd.ancestor_query(query, filter_info, None)
def test_ordered_ancestor_query(self):
query = datastore_pb.Query()
ancestor = query.mutable_ancestor()
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop1val", ns="blah")
entity_key = entity_proto1.key()
get_req = datastore_pb.GetRequest()
key = get_req.add_key()
key.MergeFrom(entity_key)
ancestor.MergeFrom(entity_key)
filter_info = []
tombstone1 = {'key': {APP_ENTITY_SCHEMA[0]:TOMBSTONE, APP_ENTITY_SCHEMA[1]: 1}}
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_get_entity").and_return(
{"test\x00blah\x00test_kind:nancy\x01":
{
APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1
}
})
db_batch.should_receive("batch_put_entity").and_return(None)
entity_proto1 = {'test\x00blah\x00test_kind:nancy\x01':{APP_ENTITY_SCHEMA[0]:entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1}}
db_batch.should_receive("range_query").and_return([entity_proto1, tombstone1]).and_return([])
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("is_in_transaction").and_return(False)
dd = DatastoreDistributed(db_batch, zookeeper)
dd.ordered_ancestor_query(query, filter_info, None)
# Now with a transaction
transaction = query.mutable_transaction()
transaction.set_handle(2)
dd.ordered_ancestor_query(query, filter_info, None)
def test_kindless_query(self):
query = datastore_pb.Query()
ancestor = query.mutable_ancestor()
entity_proto1 = self.get_new_entity_proto("test", "test_kind", "nancy", "prop1name",
"prop1val", ns="blah")
entity_key = entity_proto1.key()
get_req = datastore_pb.GetRequest()
key = get_req.add_key()
key.MergeFrom(entity_key)
tombstone1 = {'key': {APP_ENTITY_SCHEMA[0]:TOMBSTONE, APP_ENTITY_SCHEMA[1]: 1}}
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_get_entity").and_return(
{"test\x00blah\x00test_kind:nancy\x01":
{
APP_ENTITY_SCHEMA[0]: entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1
}
})
db_batch.should_receive("batch_put_entity").and_return(None)
entity_proto1 = {'test\x00blah\x00test_kind:nancy\x01':{APP_ENTITY_SCHEMA[0]:entity_proto1.Encode(),
APP_ENTITY_SCHEMA[1]: 1}}
db_batch.should_receive("range_query").and_return([entity_proto1, tombstone1]).and_return([])
zookeeper = flexmock()
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("is_in_transaction").and_return(False)
zookeeper.should_receive("acquire_lock").and_return(True)
dd = DatastoreDistributed(db_batch, zookeeper)
filter_info = {
'__key__' : [[0, 0]]
}
dd.kindless_query(query, filter_info)
def test_dynamic_delete(self):
del_request = flexmock()
del_request.should_receive("key_list")
del_request.should_receive("has_transaction").never()
del_request.should_receive("transaction").never()
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
dd.dynamic_delete("appid", del_request)
fake_element = flexmock()
fake_element.should_receive("type").and_return("kind")
fake_path = flexmock()
fake_path.should_receive("element_list").and_return([fake_element])
fake_key = flexmock()
fake_key.should_receive("path").and_return(fake_path)
del_request = flexmock()
del_request.should_receive("key_list").and_return([fake_key])
del_request.should_receive("has_transaction").and_return(True).twice()
transaction = flexmock()
transaction.should_receive("handle").and_return(1)
del_request.should_receive("transaction").and_return(transaction).once()
del_request.should_receive("has_mark_changes").and_return(False)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("acquire_locks_for_trans").and_return({})
flexmock(dd).should_receive("release_locks_for_nontrans").never()
flexmock(dd).should_receive("get_entity_kind").and_return("kind")
flexmock(dd).should_receive('delete_entities_txn')
dd.dynamic_delete("appid", del_request)
del_request = flexmock()
del_request.should_receive("key_list").and_return([fake_key])
del_request.should_receive("has_transaction").and_return(False).twice()
del_request.should_receive("has_mark_changes").and_return(False)
dd = DatastoreDistributed(db_batch, None)
flexmock(dd).should_receive("acquire_locks_for_trans").never()
flexmock(dd).should_receive("acquire_locks_for_nontrans").once().and_return({})
flexmock(dd).should_receive("delete_entities").once()
flexmock(dd).should_receive("release_locks_for_nontrans").once()
dd.dynamic_delete("appid", del_request)
def test_reverse_path(self):
zookeeper = flexmock()
zookeeper.should_receive("get_transaction_id").and_return(1)
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(None)
dd = DatastoreDistributed(db_batch, zookeeper)
key = "Project:Synapse\x01Module:Core\x01"
self.assertEquals(dd.reverse_path(key), "Module:Core\x01Project:Synapse\x01")
def test_remove_exists_filters(self):
zookeeper = flexmock()
zookeeper.should_receive("get_transaction_id").and_return(1)
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(None)
dd = DatastoreDistributed(db_batch, zookeeper)
self.assertEquals(dd.remove_exists_filters({}), {})
filter_info = {"prop1":[(datastore_pb.Query_Filter.EQUAL, "1")],
"prop2": [(datastore_pb.Query_Filter.EQUAL, "2")]}
self.assertEquals(dd.remove_exists_filters(filter_info), filter_info)
filter_info = {"prop1":[(datastore_pb.Query_Filter.EXISTS, "1")],
"prop2": [(datastore_pb.Query_Filter.EXISTS, "2")]}
self.assertEquals(dd.remove_exists_filters(filter_info), {})
def test_is_zigzag_merge_join(self):
zookeeper = flexmock()
zookeeper.should_receive("get_transaction_id").and_return(1)
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(None)
query = datastore_pb.Query()
dd = DatastoreDistributed(db_batch, zookeeper)
db_batch.should_receive("remove_exists_filters").and_return({})
self.assertEquals(dd.is_zigzag_merge_join(query, {}, {}), False)
filter_info = {"prop1":[(datastore_pb.Query_Filter.EQUAL, "1")],
"prop2": [(datastore_pb.Query_Filter.EQUAL, "2")]}
db_batch.should_receive("remove_exists_filters").and_return(filter_info)
self.assertEquals(dd.is_zigzag_merge_join(query, filter_info, []), True)
filter_info = {"prop1":[(datastore_pb.Query_Filter.EQUAL, "1")],
"prop1": [(datastore_pb.Query_Filter.EQUAL, "2")]}
self.assertEquals(dd.is_zigzag_merge_join(query, filter_info, []), False)
def test_zigzag_merge_join(self):
zookeeper = flexmock()
zookeeper.should_receive("get_transaction_id").and_return(1)
zookeeper.should_receive("get_valid_transaction_id").and_return(1)
zookeeper.should_receive("register_updated_key").and_return(1)
zookeeper.should_receive("acquire_lock").and_return(True)
zookeeper.should_receive("release_lock").and_return(True)
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive("batch_delete").and_return(None)
db_batch.should_receive("batch_put_entity").and_return(None)
db_batch.should_receive("batch_get_entity").and_return(None)
query = datastore_pb.Query()
dd = DatastoreDistributed(db_batch, zookeeper)
flexmock(dd).should_receive("is_zigzag_merge_join").and_return(False)
self.assertEquals(dd.zigzag_merge_join(None, None, None), None)
filter_info = {"prop1":[(datastore_pb.Query_Filter.EQUAL, "1")],
"prop2": [(datastore_pb.Query_Filter.EQUAL, "2")]}
flexmock(query).should_receive("kind").and_return("kind")
flexmock(dd).should_receive("get_table_prefix").and_return("prefix")
flexmock(dd).should_receive("__apply_filters").and_return([])
flexmock(query).should_receive("limit").and_return(1)
self.assertEquals(dd.zigzag_merge_join(query, filter_info, []), None)
def test_index_deletions(self):
old_entity = self.get_new_entity_proto(*self.BASIC_ENTITY)
# No deletions should occur when the entity doesn't change.
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
self.assertListEqual([], index_deletions(old_entity, old_entity))
# When a property changes, the previous index entries should be deleted.
new_entity = entity_pb.EntityProto()
new_entity.MergeFrom(old_entity)
new_entity.property_list()[0].value().set_stringvalue('updated content')
deletions = index_deletions(old_entity, new_entity)
self.assertEqual(len(deletions), 2)
self.assertEqual(deletions[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(deletions[1]['table'], dbconstants.DSC_PROPERTY_TABLE)
prop = old_entity.add_property()
prop.set_name('author')
value = prop.mutable_value()
value.set_stringvalue('author1')
prop = new_entity.add_property()
prop.set_name('author')
value = prop.mutable_value()
value.set_stringvalue('author1')
# When given an index, an entry should be removed from the composite table.
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(123)
composite_index.set_app_id('guestbook')
definition = composite_index.mutable_definition()
definition.set_entity_type('Greeting')
prop1 = definition.add_property()
prop1.set_name('content')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
prop2 = definition.add_property()
prop2.set_name('author')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
deletions = index_deletions(old_entity, new_entity, (composite_index,))
self.assertEqual(len(deletions), 3)
self.assertEqual(deletions[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(deletions[1]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(deletions[2]['table'], dbconstants.COMPOSITE_TABLE)
# No composite deletions should occur when the entity type differs.
definition.set_entity_type('TestEntity')
deletions = index_deletions(old_entity, new_entity, (composite_index,))
self.assertEqual(len(deletions), 2)
def test_deletions_for_entity(self):
entity = self.get_new_entity_proto(*self.BASIC_ENTITY)
# Deleting an entity with one property should remove four entries.
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
deletions = deletions_for_entity(entity)
self.assertEqual(len(deletions), 4)
self.assertEqual(deletions[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(deletions[1]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(deletions[2]['table'], dbconstants.APP_ENTITY_TABLE)
self.assertEqual(deletions[3]['table'], dbconstants.APP_KIND_TABLE)
prop = entity.add_property()
prop.set_name('author')
value = prop.mutable_value()
value.set_stringvalue('author1')
# Deleting an entity with two properties and one composite index should
# remove seven entries.
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(123)
composite_index.set_app_id('guestbook')
definition = composite_index.mutable_definition()
definition.set_entity_type('Greeting')
prop1 = definition.add_property()
prop1.set_name('content')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
prop2 = definition.add_property()
prop2.set_name('author')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
deletions = deletions_for_entity(entity, (composite_index,))
self.assertEqual(len(deletions), 7)
self.assertEqual(deletions[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(deletions[1]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(deletions[2]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(deletions[3]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(deletions[4]['table'], dbconstants.COMPOSITE_TABLE)
self.assertEqual(deletions[5]['table'], dbconstants.APP_ENTITY_TABLE)
self.assertEqual(deletions[6]['table'], dbconstants.APP_KIND_TABLE)
def test_mutations_for_entity(self):
entity = self.get_new_entity_proto(*self.BASIC_ENTITY)
txn = 1
# Adding an entity with one property should add four entries.
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
mutations = mutations_for_entity(entity, txn)
self.assertEqual(len(mutations), 4)
self.assertEqual(mutations[0]['table'], dbconstants.APP_ENTITY_TABLE)
self.assertEqual(mutations[1]['table'], dbconstants.APP_KIND_TABLE)
self.assertEqual(mutations[2]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[3]['table'], dbconstants.DSC_PROPERTY_TABLE)
# Updating an entity with one property should delete two entries and add
# four more.
new_entity = entity_pb.EntityProto()
new_entity.MergeFrom(entity)
new_entity.property_list()[0].value().set_stringvalue('updated content')
mutations = mutations_for_entity(entity, txn, new_entity)
self.assertEqual(len(mutations), 6)
self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)
self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)
self.assertEqual(mutations[2]['table'], dbconstants.APP_ENTITY_TABLE)
self.assertEqual(mutations[3]['table'], dbconstants.APP_KIND_TABLE)
self.assertEqual(mutations[4]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[5]['table'], dbconstants.DSC_PROPERTY_TABLE)
prop = entity.add_property()
prop.set_name('author')
prop.set_multiple(0)
value = prop.mutable_value()
value.set_stringvalue('author1')
prop = new_entity.add_property()
prop.set_name('author')
prop.set_multiple(0)
value = prop.mutable_value()
value.set_stringvalue('author1')
# Updating one property of an entity with two properties and one composite
# index should remove three entries and add seven more.
composite_index = entity_pb.CompositeIndex()
composite_index.set_id(123)
composite_index.set_app_id('guestbook')
definition = composite_index.mutable_definition()
definition.set_entity_type('Greeting')
prop1 = definition.add_property()
prop1.set_name('content')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
prop2 = definition.add_property()
prop2.set_name('author')
prop1.set_direction(datastore_pb.Query_Order.ASCENDING)
mutations = mutations_for_entity(entity, txn, new_entity,
(composite_index,))
self.assertEqual(len(mutations), 10)
self.assertEqual(mutations[0]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[0]['operation'], dbconstants.TxnActions.DELETE)
self.assertEqual(mutations[1]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(mutations[1]['operation'], dbconstants.TxnActions.DELETE)
self.assertEqual(mutations[2]['table'], dbconstants.COMPOSITE_TABLE)
self.assertEqual(mutations[2]['operation'], dbconstants.TxnActions.DELETE)
self.assertEqual(mutations[3]['table'], dbconstants.APP_ENTITY_TABLE)
self.assertEqual(mutations[4]['table'], dbconstants.APP_KIND_TABLE)
self.assertEqual(mutations[5]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[6]['table'], dbconstants.ASC_PROPERTY_TABLE)
self.assertEqual(mutations[7]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(mutations[8]['table'], dbconstants.DSC_PROPERTY_TABLE)
self.assertEqual(mutations[9]['table'], dbconstants.COMPOSITE_TABLE)
def test_apply_txn_changes(self):
app = 'guestbook'
txn = 1
entity = self.get_new_entity_proto(app, *self.BASIC_ENTITY[1:])
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
db_batch.should_receive('range_query').and_return([{
'txn_entity_1': {'operation': dbconstants.TxnActions.PUT,
'operand': entity.Encode()}
}])
db_batch.should_receive('get_indices').and_return([])
dd = DatastoreDistributed(db_batch, None)
prefix = dd.get_table_prefix(entity)
entity_key = dd.get_entity_key(prefix, entity.key().path())
db_batch.should_receive('batch_get_entity').and_return({entity_key: {}})
db_batch.should_receive('batch_mutate')
dd.apply_txn_changes(app, txn)
def test_delete_entities_txn(self):
app = 'guestbook'
txn_hash = {'root_key': 1}
txn_str = '1'.zfill(ID_KEY_LENGTH)
entity = self.get_new_entity_proto(app, *self.BASIC_ENTITY[1:])
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
keys = [entity.key()]
prefix = dd.get_table_prefix(entity.key())
entity_key = dd.get_entity_key(prefix, entity.key().path())
encoded_path = str(
dd.encode_index_pb(entity.key().path()))
txn_keys = [dd._SEPARATOR.join([app, txn_str, '', encoded_path])]
txn_values = {
txn_keys[0]: {
dbconstants.TRANSACTIONS_SCHEMA[0]: dbconstants.TxnActions.DELETE,
dbconstants.TRANSACTIONS_SCHEMA[1]: entity_key,
dbconstants.TRANSACTIONS_SCHEMA[2]: ''
}
}
flexmock(dd).should_receive('get_root_key').and_return('root_key')
db_batch.should_receive('batch_put_entity').with_args(
dbconstants.TRANSACTIONS_TABLE,
txn_keys,
dbconstants.TRANSACTIONS_SCHEMA,
txn_values,
ttl=TX_TIMEOUT * 2
)
dd.delete_entities_txn(app, keys, txn_hash)
def test_put_entities_txn(self):
app = 'guestbook'
txn_hash = {'root_key': 1}
txn_str = '1'.zfill(ID_KEY_LENGTH)
entity = self.get_new_entity_proto(app, *self.BASIC_ENTITY[1:])
db_batch = flexmock()
db_batch.should_receive('valid_data_version').and_return(True)
dd = DatastoreDistributed(db_batch, None)
entities = [entity]
encoded_path = str(
dd.encode_index_pb(entity.key().path()))
txn_keys = [dd._SEPARATOR.join([app, txn_str, '', encoded_path])]
txn_values = {
txn_keys[0]: {
dbconstants.TRANSACTIONS_SCHEMA[0]: dbconstants.TxnActions.PUT,
dbconstants.TRANSACTIONS_SCHEMA[1]: entity.Encode(),
dbconstants.TRANSACTIONS_SCHEMA[2]: ''
}
}
flexmock(dd).should_receive('get_root_key').and_return('root_key')
db_batch.should_receive('batch_put_entity').with_args(
dbconstants.TRANSACTIONS_TABLE,
txn_keys,
dbconstants.TRANSACTIONS_SCHEMA,
txn_values,
ttl=TX_TIMEOUT * 2
)
dd.put_entities_txn(entities, txn_hash, app)
if __name__ == "__main__":
unittest.main()
| 43.691489
| 106
| 0.71937
| 5,811
| 45,177
| 5.253657
| 0.062296
| 0.080481
| 0.037473
| 0.05765
| 0.828098
| 0.793377
| 0.744997
| 0.728258
| 0.670117
| 0.648989
| 0
| 0.017779
| 0.154636
| 45,177
| 1,033
| 107
| 43.733785
| 0.781598
| 0.023906
| 0
| 0.626728
| 0
| 0.003456
| 0.135834
| 0.041261
| 0
| 0
| 0
| 0
| 0.099078
| 1
| 0.046083
| false
| 0.001152
| 0.025346
| 0
| 0.078341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d49d7f273f2c91d0119bd0f184580c6b2ab36116
| 603
|
py
|
Python
|
6_heroku_flask_app/app.py
|
eklee151/Capstone
|
7c2b7f4321ed650961bda313b66348b8a5e25593
|
[
"CC0-1.0"
] | 1
|
2020-12-13T12:26:31.000Z
|
2020-12-13T12:26:31.000Z
|
6_heroku_flask_app/app.py
|
eklee151/Adverse-Childhood-Experiences
|
7c2b7f4321ed650961bda313b66348b8a5e25593
|
[
"CC0-1.0"
] | null | null | null |
6_heroku_flask_app/app.py
|
eklee151/Adverse-Childhood-Experiences
|
7c2b7f4321ed650961bda313b66348b8a5e25593
|
[
"CC0-1.0"
] | null | null | null |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/abuse')
def abuse():
return render_template('abuse.html')
@app.route('/household')
def household():
return render_template('household.html')
@app.route('/cdc-kaiser')
def cdc_kaiser():
return render_template('cdc-kaiser.html')
@app.route('/resources')
def resources():
return render_template('resources.html')
@app.route('/faqs')
def faqs():
return render_template('faqs.html')
if __name__ == '__main__':
app.run(debug = True)
| 19.451613
| 45
| 0.691542
| 78
| 603
| 5.089744
| 0.294872
| 0.246851
| 0.302267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 603
| 31
| 46
| 19.451613
| 0.760536
| 0
| 0
| 0
| 0
| 0
| 0.201987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.045455
| 0.272727
| 0.590909
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
d4beda1805471309475cc47776e4d417fe7dad77
| 19
|
py
|
Python
|
packages/kite/node_modules/kite-api/test/fixtures/sources/json-completions.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | 2
|
2019-02-11T12:16:13.000Z
|
2019-04-21T21:59:39.000Z
|
packages/kite/node_modules/kite-api/test/fixtures/sources/json-completions.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | 44
|
2018-07-26T18:44:33.000Z
|
2021-06-02T00:03:09.000Z
|
packages/kite/node_modules/kite-api/test/fixtures/sources/json-completions.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | 2
|
2021-09-19T06:30:27.000Z
|
2022-01-18T20:41:38.000Z
|
import json
json.
| 4.75
| 11
| 0.736842
| 3
| 19
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 3
| 12
| 6.333333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d4ddfad224695f5f988a2f200340f1a650836e27
| 209
|
py
|
Python
|
htmlparser/src/Script.py
|
weblit/html-parser
|
9fa9cc97ff1a747fd742397ad6b2d65d10aeba50
|
[
"MIT"
] | 1
|
2022-01-03T07:28:21.000Z
|
2022-01-03T07:28:21.000Z
|
src/src/Script.py
|
weblit/weblit
|
f1de52348b1fde738fab2a5591be8345a430aa37
|
[
"Apache-2.0"
] | 3
|
2022-01-12T14:51:20.000Z
|
2022-02-01T05:14:39.000Z
|
src/src/Script.py
|
weblit/weblit
|
f1de52348b1fde738fab2a5591be8345a430aa37
|
[
"Apache-2.0"
] | null | null | null |
class Script:
sourceURL: str
otherArguments: list
def __init__(self, sourceURL: str, otherArguments: list) -> None:
self.sourceURL = sourceURL
self.otherArguments = otherArguments
| 26.125
| 69
| 0.688995
| 20
| 209
| 7
| 0.5
| 0.171429
| 0.371429
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23445
| 209
| 7
| 70
| 29.857143
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
be051380c4c464e4dcd7e3ebbc8c2b0ef7a06956
| 8,833
|
py
|
Python
|
tests/test_mssa_decomposer.py
|
harveybc/feature-engineering
|
21e765106e7c6bf503d83d18a781806fc9a863cf
|
[
"MIT"
] | 5
|
2020-05-18T21:08:07.000Z
|
2021-11-18T00:57:22.000Z
|
tests/test_mssa_decomposer.py
|
harveybc/feature-engineering
|
21e765106e7c6bf503d83d18a781806fc9a863cf
|
[
"MIT"
] | 7
|
2020-04-04T03:56:39.000Z
|
2020-04-04T05:21:42.000Z
|
tests/test_mssa_decomposer.py
|
harveybc/training-signal
|
21e765106e7c6bf503d83d18a781806fc9a863cf
|
[
"MIT"
] | 3
|
2020-05-18T21:08:13.000Z
|
2021-07-13T08:27:22.000Z
|
# -*- coding: utf-8 -*-
import pytest
import csv
import sys
import os
from filecmp import cmp
from feature_eng.feature_eng import FeatureEng
__author__ = "Harvey Bastidas"
__copyright__ = "Harvey Bastidas"
__license__ = "mit"
class Conf:
""" This method initialize the configuration variables for a plugin """
def __init__(self):
""" Component Tests Constructor """
self.input_file = os.path.join(os.path.dirname(__file__), "data/test_c02_t03_output.csv")
""" Test dataset filename """
self.output_file = os.path.join(os.path.dirname(__file__), "data/test_c03_output.csv")
""" Output dataset filename """
self.list_plugins = False
self.core_plugin = "mssa_decomposer"
self.num_components = 8
self.window_size = 30
self.group_file = None
self.plot_prefix = None
self.w_prefix = None
class TestMSSADecomposer:
""" Component Tests """
def setup_method(self, test_method):
""" Component Tests Constructor """
self.conf = Conf()
self.rows_d, self.cols_d = self.get_size_csv(self.conf.input_file)
""" Get the number of rows and columns of the test dataset """
try:
os.remove(self.conf.output_file)
except:
print("No test output file found.")
pass
def get_size_csv(self, csv_file):
""" Get the number of rows and columns of a test dataset, used in all tests.
Args:
csv_file (string): Path and filename of a test dataset
Returns:
(int,int): number of rows, number of columns
"""
rows = list(csv.reader(open(csv_file)))
return (len(rows), len(rows[0]))
def test_C03T01_core(self):
""" Loads plugin from FeatureEng using parameters from setup_method() and Asses that output file has 1 column and num_ticks - forward_ticks """
self.fe = FeatureEng(self.conf)
# get the number of rows and cols from out_file
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assertion
assert (cols_o == self.fe.ep_core.cols_d * self.conf.num_components)
def test_C03T02_cmdline(self):
""" same as C03T02, but via command-line """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components "
+ str(self.conf.num_components)
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if the number of rows an colums is less than the input dataset and > 0
assert (cols_o == self.cols_d * self.conf.num_components)
def test_C03T03_group_file(self):
""" assert if there are 3 groups per feature in the output dataset """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components "
+ str(self.conf.num_components)
+ " --group_file "
+ os.path.join(os.path.dirname(__file__), "data/groups.json")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o == self.cols_d * 4)
def test_C03T04_w_prefix(self):
""" assert if there are 3 groups per feature in the output dataset """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components "
+ str(self.conf.num_components)
+ " --w_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/w_")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o == self.cols_d * self.conf.num_components)
def test_C03T05_w_prefix_group_file(self):
""" assert if there are 4 groups per feature in the output dataset """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components "
+ str(self.conf.num_components)
+ " --w_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/w_")
+ " --group_file "
+ os.path.join(os.path.dirname(__file__), "data/groups.json")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o == self.cols_d * 4)
def test_C03T06_plot_prefix(self):
""" """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components "
+ str(self.conf.num_components)
+ " --plot_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o == self.cols_d * self.conf.num_components)
def test_C03T07_svht_plot_w_prefix(self):
""" """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components 0"
+ " --plot_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/svht_")
+ " --w_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/svht_w_")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o > self.cols_d)
def test_C03T08_svht_plot_w_prefix_group(self):
""" assert if there are 4 groups per feature in the output dataset """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ self.conf.input_file
+ " --output_file "
+ self.conf.output_file
+ " --num_components 0"
+ " --plot_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/svht_gr_")
+ " --w_prefix "
+ os.path.join(os.path.dirname(__file__), "plots/svht_w_gr_")
+ " --group_file "
+ os.path.join(os.path.dirname(__file__), "data/groups.json")
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o > self.cols_d)
def test_C03T09_svht_multi(self):
""" assert if there are 4 groups per feature in the output dataset """
os.system("feature_eng --core_plugin mssa_decomposer --input_file "
+ os.path.join(os.path.dirname(__file__), "data/test_input.csv")
+ " --output_file "
+ self.conf.output_file
+ " --num_components 0"
)
# get the size of the output dataset
rows_d, cols_d = self.get_size_csv(self.conf.input_file)
# get the size of the output dataset
rows_o, cols_o = self.get_size_csv(self.conf.output_file)
# assert if there are 3 groups per feature in the output dataset
assert (cols_o > self.cols_d)
| 42.263158
| 151
| 0.611117
| 1,206
| 8,833
| 4.201493
| 0.121061
| 0.071048
| 0.088415
| 0.052497
| 0.75153
| 0.75153
| 0.747385
| 0.743833
| 0.73732
| 0.717584
| 0
| 0.011125
| 0.287671
| 8,833
| 209
| 152
| 42.263158
| 0.794183
| 0.224499
| 0
| 0.595745
| 0
| 0
| 0.169025
| 0.007911
| 0
| 0
| 0
| 0
| 0.06383
| 1
| 0.085106
| false
| 0.007092
| 0.042553
| 0
| 0.148936
| 0.007092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be19e1350ba260e07d479355bdff3ea1e4d0b490
| 95
|
py
|
Python
|
tests/test_samplemodule.py
|
tateishi/python-simple
|
13f1450ad2c715f2e687569a504010a49543d52a
|
[
"MIT"
] | null | null | null |
tests/test_samplemodule.py
|
tateishi/python-simple
|
13f1450ad2c715f2e687569a504010a49543d52a
|
[
"MIT"
] | null | null | null |
tests/test_samplemodule.py
|
tateishi/python-simple
|
13f1450ad2c715f2e687569a504010a49543d52a
|
[
"MIT"
] | null | null | null |
from samplemodule import message
def test_message():
assert message == 'Hello World'
| 15.833333
| 35
| 0.705263
| 11
| 95
| 6
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221053
| 95
| 6
| 36
| 15.833333
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
076c55487542f1122895593e2b5d44ec585c6675
| 72
|
py
|
Python
|
deepextract/__init__.py
|
bobbytrapz/deepextract
|
cefbacea7219a0e93966b7e431ff20d02fdf9627
|
[
"CC0-1.0"
] | 1
|
2019-11-08T16:27:37.000Z
|
2019-11-08T16:27:37.000Z
|
deepextract/__init__.py
|
bobbytrapz/deepextract
|
cefbacea7219a0e93966b7e431ff20d02fdf9627
|
[
"CC0-1.0"
] | null | null | null |
deepextract/__init__.py
|
bobbytrapz/deepextract
|
cefbacea7219a0e93966b7e431ff20d02fdf9627
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# flake8: noqa: F401
from .deepextract import *
| 18
| 26
| 0.625
| 9
| 72
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084746
| 0.180556
| 72
| 3
| 27
| 24
| 0.677966
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0781e00fc1083e8756af9500c037a44d2ac95ace
| 60
|
py
|
Python
|
sickbeard/lib/hachoir_parser/network/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/network/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/network/__init__.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
from lib.hachoir_parser.network.tcpdump import TcpdumpFile
| 20
| 58
| 0.866667
| 8
| 60
| 6.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 60
| 2
| 59
| 30
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
078eb48447124a7c171571de4603b9336340ea74
| 1,499
|
py
|
Python
|
python/phonenumbers/data/region_MU.py
|
chlammas/python-phonenumbers
|
e75438338d70e33aee530a29960d2b0911dfb09f
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/data/region_MU.py
|
chlammas/python-phonenumbers
|
e75438338d70e33aee530a29960d2b0911dfb09f
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/data/region_MU.py
|
chlammas/python-phonenumbers
|
e75438338d70e33aee530a29960d2b0911dfb09f
|
[
"Apache-2.0"
] | null | null | null |
"""Auto-generated file, do not edit by hand. MU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MU = PhoneMetadata(id='MU', country_code=230, international_prefix='0(?:0|[24-7]0|3[03])',
general_desc=PhoneNumberDesc(national_number_pattern='(?:5|8\\d\\d)\\d{7}|[2-468]\\d{6}', possible_length=(7, 8, 10)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2(?:[0346-8]\\d|1[0-7])|4(?:[013568]\\d|2[4-7])|54(?:[3-5]\\d|71)|6\\d\\d|8(?:14|3[129]))\\d{4}', example_number='54480123', possible_length=(7, 8)),
mobile=PhoneNumberDesc(national_number_pattern='5(?:4(?:2[1-389]|7[1-9])|87[15-8])\\d{4}|5(?:2[5-9]|4[3-689]|[57]\\d|8[0-689]|9[0-8])\\d{5}', example_number='52512345', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='802\\d{7}|80[0-2]\\d{4}', example_number='8001234', possible_length=(7, 10)),
premium_rate=PhoneNumberDesc(national_number_pattern='30\\d{5}', example_number='3012345', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='3(?:20|9\\d)\\d{4}', example_number='3201234', possible_length=(7,)),
preferred_international_prefix='020',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[2-46]|8[013]']),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['5']),
NumberFormat(pattern='(\\d{5})(\\d{5})', format='\\1 \\2', leading_digits_pattern=['8'])])
| 99.933333
| 208
| 0.667111
| 237
| 1,499
| 4.050633
| 0.337553
| 0.014583
| 0.18125
| 0.225
| 0.16875
| 0.091667
| 0.0625
| 0.0625
| 0
| 0
| 0
| 0.12894
| 0.068712
| 1,499
| 14
| 209
| 107.071429
| 0.558739
| 0.035357
| 0
| 0
| 1
| 0.25
| 0.289583
| 0.170139
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07a4ae27fc47855640afe5c8440912a7fedaa95f
| 45
|
py
|
Python
|
activitystreams/exception.py
|
sovaa/activitystreams
|
def84e8d70efe3f78c45a25fa69f641577efa19c
|
[
"MIT"
] | 1
|
2017-04-28T23:23:32.000Z
|
2017-04-28T23:23:32.000Z
|
activitystreams/exception.py
|
sovaa/activitystreams
|
def84e8d70efe3f78c45a25fa69f641577efa19c
|
[
"MIT"
] | null | null | null |
activitystreams/exception.py
|
sovaa/activitystreams
|
def84e8d70efe3f78c45a25fa69f641577efa19c
|
[
"MIT"
] | null | null | null |
class ActivityException(Exception):
pass
| 15
| 35
| 0.777778
| 4
| 45
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 36
| 22.5
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
07caa7386bf421e5486e090d4ba6131e7beafc7f
| 143
|
py
|
Python
|
celery_uncovered/toyex/views.py
|
TinlokLee/Python-CelerySS
|
f66c42d2eace51e28b17ed6d7eec0fa765f9d90b
|
[
"MIT"
] | null | null | null |
celery_uncovered/toyex/views.py
|
TinlokLee/Python-CelerySS
|
f66c42d2eace51e28b17ed6d7eec0fa765f9d90b
|
[
"MIT"
] | null | null | null |
celery_uncovered/toyex/views.py
|
TinlokLee/Python-CelerySS
|
f66c42d2eace51e28b17ed6d7eec0fa765f9d90b
|
[
"MIT"
] | null | null | null |
from django.views.generic import View
class ReportErrorView(View):
def get(self, request, *args, **kwargs):
return 1 / 0
| 17.875
| 45
| 0.636364
| 18
| 143
| 5.055556
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.258741
| 143
| 7
| 46
| 20.428571
| 0.839623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
07cc7b989cfa384bd4ca511387c7e386d6a7bef8
| 120
|
py
|
Python
|
stormspotter/ingestor/assets/azure/utils.py
|
jemrobinson/Stormspotter
|
833695a63b848e3e10369a630d228e7c51be6c65
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
stormspotter/ingestor/assets/azure/utils.py
|
jemrobinson/Stormspotter
|
833695a63b848e3e10369a630d228e7c51be6c65
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
stormspotter/ingestor/assets/azure/utils.py
|
jemrobinson/Stormspotter
|
833695a63b848e3e10369a630d228e7c51be6c65
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-08T21:54:06.000Z
|
2021-09-08T21:54:06.000Z
|
import re
from msrestazure.azure_exceptions import CloudError
from stormspotter.ingestor.utils import SSC as context
| 17.142857
| 54
| 0.85
| 16
| 120
| 6.3125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 120
| 6
| 55
| 20
| 0.961905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07eeb246b2c538e4f48a224f8946c1c654e78112
| 217
|
py
|
Python
|
main/PluginDemos/GlobalBoundaryPixelTracker/Simulation/cellsort_2D_boundary.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/PluginDemos/GlobalBoundaryPixelTracker/Simulation/cellsort_2D_boundary.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/PluginDemos/GlobalBoundaryPixelTracker/Simulation/cellsort_2D_boundary.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | 1
|
2021-02-26T21:50:29.000Z
|
2021-02-26T21:50:29.000Z
|
from cc3d import CompuCellSetup
from cellsort_2D_boundarySteppables import cellsort_2D_boundarySteppable
CompuCellSetup.register_steppable(steppable=cellsort_2D_boundarySteppable(frequency=1))
CompuCellSetup.run()
| 27.125
| 87
| 0.894009
| 23
| 217
| 8.130435
| 0.565217
| 0.160428
| 0.28877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02451
| 0.059908
| 217
| 7
| 88
| 31
| 0.892157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6af71ebd42ac9ce6c62b1eedfd3ccb3764bf5d25
| 189
|
py
|
Python
|
utils/tests/test_State.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
utils/tests/test_State.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
utils/tests/test_State.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
from utils.State import state
class TestState:
def test_get_set_prop(self):
state.set_prop('test_key', 'test_value')
assert state.get_prop('test_key') == 'test_value'
| 23.625
| 57
| 0.693122
| 28
| 189
| 4.357143
| 0.535714
| 0.114754
| 0.180328
| 0.245902
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 189
| 7
| 58
| 27
| 0.797386
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
ed3fec30bb7ec8383945eb4dae793f2652b7b9e3
| 640
|
py
|
Python
|
src/flask_bombril/form_validators/unique/__init__.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
src/flask_bombril/form_validators/unique/__init__.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
src/flask_bombril/form_validators/unique/__init__.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ======================================================================================================================
# The MIT License (MIT)
# ======================================================================================================================
# Copyright (c) 2016 [Marco Aurélio Prado - marco.pdsv@gmail.com]
# ======================================================================================================================
from unique import Unique
from test_case_invalid_inputs import TestCaseInvalidInputs
from test_case_valid_inputs import TestCaseValidInputs
| 58.181818
| 120
| 0.339063
| 39
| 640
| 5.410256
| 0.74359
| 0.075829
| 0.113744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.0625
| 640
| 10
| 121
| 64
| 0.343333
| 0.757813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ed47b8526f19e3d6973785f29bf760f412e50d00
| 471
|
py
|
Python
|
challenges/3.A.Logical_And_Operator/lesson_tests.py
|
pradeepsaiu/python-coding-challenges
|
b435ab650d85de267eeaa31a55ff77ef5dbff86b
|
[
"BSD-3-Clause"
] | 141
|
2017-05-07T00:38:22.000Z
|
2022-03-25T10:14:25.000Z
|
challenges/3.A.Logical_And_Operator/lesson_tests.py
|
pradeepsaiu/python-coding-challenges
|
b435ab650d85de267eeaa31a55ff77ef5dbff86b
|
[
"BSD-3-Clause"
] | 23
|
2017-05-06T23:57:37.000Z
|
2018-03-23T19:07:32.000Z
|
challenges/3.A.Logical_And_Operator/lesson_tests.py
|
pradeepsaiu/python-coding-challenges
|
b435ab650d85de267eeaa31a55ff77ef5dbff86b
|
[
"BSD-3-Clause"
] | 143
|
2017-05-07T09:33:35.000Z
|
2022-03-12T21:04:13.000Z
|
import unittest
from main import *
class BooleanAndOperatorTests(unittest.TestCase):
def test_main(self):
self.assertIsNotNone(boolean_and(30))
self.assertEqual(boolean_and(1), "Try Again")
self.assertEqual(boolean_and(24), "Try Again")
self.assertEqual(boolean_and(25), "Pass")
self.assertEqual(boolean_and(45), "Pass")
self.assertEqual(boolean_and(50), "Pass")
self.assertEqual(boolean_and(51), "Try Again")
| 36.230769
| 54
| 0.685775
| 57
| 471
| 5.526316
| 0.421053
| 0.222222
| 0.419048
| 0.47619
| 0.485714
| 0.209524
| 0
| 0
| 0
| 0
| 0
| 0.033854
| 0.184713
| 471
| 12
| 55
| 39.25
| 0.786458
| 0
| 0
| 0
| 0
| 0
| 0.082803
| 0
| 0
| 0
| 0
| 0
| 0.636364
| 1
| 0.090909
| false
| 0.272727
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ed55c28f9c5c92f98463fc5c49daed997e554731
| 8,752
|
py
|
Python
|
tests/test_configuration.py
|
amor71/sanic-jwt
|
93a3e51f3c6e6c81f83a99b4e175c3d32b33f204
|
[
"MIT"
] | 1
|
2019-03-19T12:30:33.000Z
|
2019-03-19T12:30:33.000Z
|
tests/test_configuration.py
|
amor71/sanic-jwt
|
93a3e51f3c6e6c81f83a99b4e175c3d32b33f204
|
[
"MIT"
] | null | null | null |
tests/test_configuration.py
|
amor71/sanic-jwt
|
93a3e51f3c6e6c81f83a99b4e175c3d32b33f204
|
[
"MIT"
] | null | null | null |
import pytest
from sanic import Sanic
from sanic.response import json
from sanic_jwt import Configuration, exceptions, initialize, Initialize
from sanic_jwt.configuration import ConfigItem
def test_configuration_initialize_method_default():
try:
app = Sanic()
initialize(app, authenticate=lambda: True)
except Exception as e:
pytest.fail("Raised exception: {}".format(e))
def test_configuration_initialize_class_default():
try:
app = Sanic()
Initialize(app, authenticate=lambda: True)
except Exception as e:
pytest.fail("Raised exception: {}".format(e))
def test_configuration_initialize_class_app_level():
app = Sanic()
app.config.SANIC_JWT_ACCESS_TOKEN_NAME = "app-level"
sanicjwt = Initialize(app, authenticate=lambda: True)
assert app.config.SANIC_JWT_ACCESS_TOKEN_NAME == "app-level"
assert sanicjwt.config.access_token_name() == "app-level"
def test_configuration_initialize_class_config_level_custom_classes():
app = Sanic()
app.config.SANIC_JWT_ACCESS_TOKEN_NAME = "app-level"
class MyConfig(Configuration):
access_token_name = "config-level"
class MyInitialize(Initialize):
configuration_class = MyConfig
sanicjwt = MyInitialize(app, authenticate=lambda: True)
assert sanicjwt.config.access_token_name() == "config-level"
def test_configuration_initialize_class_instance_level():
app = Sanic()
app.config.SANIC_JWT_ACCESS_TOKEN_NAME = "app-level"
sanicjwt = Initialize(
app, authenticate=lambda: True, access_token_name="instance-level"
)
assert sanicjwt.config.access_token_name() == "instance-level"
def test_configuration_initialize_class_instance_level_custom_classes():
app = Sanic()
app.config.SANIC_JWT_ACCESS_TOKEN_NAME = "app-level"
class MyConfig(Configuration):
access_token_name = "config-level"
class MyInitialize(Initialize):
configuration_class = MyConfig
sanicjwt = MyInitialize(
app, authenticate=lambda: True, access_token_name="instance-level"
)
assert sanicjwt.config.access_token_name() == "instance-level"
def test_configuration_initialize_class_with_getter():
app = Sanic()
class MyConfig(Configuration):
def set_access_token_name(self):
return "return-level"
class MyInitialize(Initialize):
configuration_class = MyConfig
sanicjwt = MyInitialize(app, authenticate=lambda: True)
assert sanicjwt.config.access_token_name() == "return-level"
def test_configuration_initialize_class_as_argument():
app = Sanic()
class MyConfig(Configuration):
def set_access_token_name(self):
return "return-level"
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=lambda: True)
assert sanicjwt.config.access_token_name() == "return-level"
def test_configuration_warning_non_callable(caplog):
app = Sanic()
class MyConfig(Configuration):
set_access_token_name = "return-level"
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=lambda: True)
for record in caplog.records:
if record.levelname == "WARNING":
assert (
record.message
== 'variable "set_access_token_name" set in Configuration is not callable'
)
assert sanicjwt.config.access_token_name() == "access_token"
def test_configuration_warning_non_valid_key(caplog):
app = Sanic()
Initialize(app, foobar="baz", authenticate=lambda: True)
for record in caplog.records:
if record.levelname == "WARNING":
assert (
record.message
== "Configuration key 'foobar' found is not valid for sanic-jwt"
)
def test_configuration_dynamic_config():
app = Sanic()
auth_header_key = "x-authorization-header"
class MyConfig(Configuration):
def get_authorization_header(self, request):
if auth_header_key in request.headers:
return request.headers.get(auth_header_key)
return "authorization"
async def authenticate(request, *args, **kwargs):
return {"user_id": 1}
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=authenticate)
@app.route("/protected")
@sanicjwt.protected()
def protected_route(request):
return json({"protected": "yes"})
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanicjwt.config.access_token_name(), None)
assert access_token is not None
_, response = app.test_client.get(
"/protected",
headers={
auth_header_key: "foobarbaz",
"foobarbaz": "Bearer {}".format(access_token),
},
)
assert response.status == 200
assert response.json.get("protected") == "yes"
_, response = app.test_client.get(
"/protected",
headers={
sanicjwt.config.authorization_header(): "Bearer {}".format(access_token)
},
)
assert response.status == 200
assert response.json.get("protected") == "yes"
def test_deprecated_handler_payload_scopes():
app = Sanic()
app.config.SANIC_JWT_HANDLER_PAYLOAD_SCOPES = lambda *a, **kw: {}
with pytest.raises(exceptions.InvalidConfiguration):
Initialize(app, authenticate=lambda: True)
def test_deprecated_payload_handler():
app = Sanic()
app.config.SANIC_JWT_PAYLOAD_HANDLER = lambda *a, **kw: {}
with pytest.raises(exceptions.InvalidConfiguration):
Initialize(app, authenticate=lambda: True)
def test_deprecated_handler_payload_extend():
app = Sanic()
app.config.SANIC_JWT_HANDLER_PAYLOAD_EXTEND = lambda *a, **kw: {}
with pytest.raises(exceptions.InvalidConfiguration):
Initialize(app, authenticate=lambda: True)
def test_empty_string_authorization_prefix():
app = Sanic()
authorization_header = "custom-authorization-header"
authorization_header_prefix = ""
async def authenticate(request, *args, **kwargs):
return {"user_id": 1}
sanicjwt = Initialize(
app,
authenticate=authenticate,
authorization_header=authorization_header,
authorization_header_prefix=authorization_header_prefix,
)
@app.route("/protected")
@sanicjwt.protected()
def protected_route(request):
return json({"protected": "yes"})
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanicjwt.config.access_token_name(), None)
assert access_token is not None
_, response = app.test_client.get(
"/protected", headers={authorization_header: access_token}
)
assert response.status == 200
assert response.json.get("protected") == "yes"
_, response = app.test_client.get(
"/protected", headers={sanicjwt.config.authorization_header(): access_token}
)
assert response.status == 200
assert response.json.get("protected") == "yes"
# I don't see the following scenarios happening in real life
# but we have to test them ...
def test_configuration_custom_class_and_config_item():
app = Sanic()
class MyConfig(Configuration):
access_token_name = ConfigItem("config-item-level")
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=lambda: True)
assert sanicjwt.config.access_token_name() == "config-item-level"
def test_configuration_custom_class_and_config_item_as_method():
app = Sanic()
class MyConfig(Configuration):
def set_access_token_name(self):
return ConfigItem("config-item-function-level")
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=lambda: True)
assert sanicjwt.config.access_token_name() == "config-item-function-level"
def test_configuration_invalid_claim():
app = Sanic()
class MyConfig(Configuration):
claim_foo = "bar"
sanicjwt = Initialize(app, configuration_class=MyConfig, authenticate=lambda: True)
assert "claim_foo" not in sanicjwt.config._all_config_keys
def test_disable_protection():
app = Sanic()
async def authenticate(request, *args, **kwargs):
return {"user_id": 1}
sanicjwt = Initialize(app, authenticate=authenticate, do_protection=False)
@app.route("/protected")
@sanicjwt.protected()
def protected_route(request):
return json({"protected": "yes"})
_, response = app.test_client.get("/protected")
assert response.status == 200
assert response.json.get("protected") == "yes"
| 28.601307
| 90
| 0.691728
| 971
| 8,752
| 5.993821
| 0.136972
| 0.066151
| 0.06701
| 0.047251
| 0.810997
| 0.764089
| 0.737285
| 0.724742
| 0.684536
| 0.677663
| 0
| 0.002863
| 0.201782
| 8,752
| 305
| 91
| 28.695082
| 0.830232
| 0.009941
| 0
| 0.621891
| 0
| 0
| 0.099284
| 0.014315
| 0
| 0
| 0
| 0
| 0.124378
| 1
| 0.129353
| false
| 0.00995
| 0.024876
| 0.029851
| 0.308458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed5f3078f02ed1ca5bcc2b82ab9acc7028d3f77c
| 269
|
py
|
Python
|
src/pymortests/basic.py
|
TiKeil/pymor
|
5c6b3b6e1714b5ede11ce7cf03399780ab29d252
|
[
"Unlicense"
] | 1
|
2020-12-31T18:45:48.000Z
|
2020-12-31T18:45:48.000Z
|
src/pymortests/basic.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | 4
|
2022-03-17T10:07:38.000Z
|
2022-03-30T12:41:06.000Z
|
src/pymortests/basic.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
def test_importable():
import pymor.basic
| 33.625
| 77
| 0.754647
| 40
| 269
| 5.05
| 0.775
| 0.039604
| 0.09901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042918
| 0.133829
| 269
| 7
| 78
| 38.428571
| 0.824034
| 0.795539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c13201590d56bde0b842bd7b885d81dad0d98ef
| 129
|
py
|
Python
|
improc/evaluation/__init__.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | 2
|
2019-09-29T08:43:31.000Z
|
2022-01-12T09:46:18.000Z
|
improc/evaluation/__init__.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | null | null | null |
improc/evaluation/__init__.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .quality import mse, psnr, showorirec, normalization
from .ssims import ssim, gssim
| 32.25
| 58
| 0.821705
| 17
| 129
| 5.941176
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131783
| 129
| 3
| 59
| 43
| 0.901786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c134e58d22f13e52d65f2ce8e2aa26a4f374f39
| 28
|
py
|
Python
|
code/abc023_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/abc023_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/abc023_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
print(sum(map(int,input())))
| 28
| 28
| 0.678571
| 5
| 28
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 1
| 28
| 28
| 0.678571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9c732788bf8f5980a9684cfec176b8f8b32c5eed
| 225
|
py
|
Python
|
mySite/applications/resume/urls.py
|
ALittleMoron/django_mySite
|
b0cedcc31d9016a862015bc7da0de7ff09441e8b
|
[
"Unlicense"
] | null | null | null |
mySite/applications/resume/urls.py
|
ALittleMoron/django_mySite
|
b0cedcc31d9016a862015bc7da0de7ff09441e8b
|
[
"Unlicense"
] | null | null | null |
mySite/applications/resume/urls.py
|
ALittleMoron/django_mySite
|
b0cedcc31d9016a862015bc7da0de7ff09441e8b
|
[
"Unlicense"
] | null | null | null |
from django.urls import path
from .views import Resume, GitHubProjects
urlpatterns = [
path('', Resume.as_view(), name='resume'),
path('git-hub-projects/', GitHubProjects.as_view(), name='resume/gitHubProjects'),
]
| 25
| 86
| 0.711111
| 27
| 225
| 5.851852
| 0.555556
| 0.253165
| 0.126582
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128889
| 225
| 9
| 87
| 25
| 0.806122
| 0
| 0
| 0
| 0
| 0
| 0.19469
| 0.09292
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9c790759021794710ea2aefb9f825b1c1c4e55b2
| 1,728
|
py
|
Python
|
tests/networks/test_parallel.py
|
mtcrawshaw/meta-world
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | 4
|
2021-09-21T07:24:26.000Z
|
2022-03-25T00:28:33.000Z
|
tests/networks/test_parallel.py
|
mtcrawshaw/meta
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | null | null | null |
tests/networks/test_parallel.py
|
mtcrawshaw/meta
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | null | null | null |
""" Unit tests for meta/networks/parallel.py. """
import torch
import torch.nn as nn
from meta.networks.utils import Parallel
INPUT_SIZE = 10
OUTPUT_SIZE = 8
NUM_LAYERS = 5
BATCH_SIZE = 6
def test_new_dim():
"""
Test that outputs from Parallel are correctly computed and stacked when
`new_dim=True`.
"""
# Create network.
modules = [nn.Linear(INPUT_SIZE, OUTPUT_SIZE) for _ in range(NUM_LAYERS)]
parallel = Parallel(modules, new_dim=True)
# Construct batch of inputs.
inputs = 2 * torch.rand((BATCH_SIZE, INPUT_SIZE)) - 1
# Pass inputs through modules.
outputs = parallel(inputs)
# Check that outputs were correctly stacked.
assert outputs.shape == (NUM_LAYERS, BATCH_SIZE, OUTPUT_SIZE)
# Check that outputs were correctly computed.
for layer in range(NUM_LAYERS):
assert torch.allclose(outputs[layer], modules[layer](inputs))
def test_no_new_dim():
"""
Test that outputs from Parallel are correctly computed and stacked when
`new_dim=False`.
"""
# Create network.
modules = [nn.Linear(INPUT_SIZE, OUTPUT_SIZE) for _ in range(NUM_LAYERS)]
parallel = Parallel(modules, new_dim=False)
# Construct batch of inputs.
inputs = 2 * torch.rand((BATCH_SIZE, INPUT_SIZE)) - 1
# Pass inputs through modules.
outputs = parallel(inputs)
# Check that outputs were correctly stacked.
assert outputs.shape == (NUM_LAYERS * BATCH_SIZE, OUTPUT_SIZE)
# Check that outputs were correctly computed.
for layer in range(NUM_LAYERS):
layer_start = layer * BATCH_SIZE
layer_end = (layer + 1) * BATCH_SIZE
assert torch.allclose(outputs[layer_start:layer_end], modules[layer](inputs))
| 27.428571
| 85
| 0.69213
| 233
| 1,728
| 4.965665
| 0.257511
| 0.054451
| 0.048401
| 0.055315
| 0.767502
| 0.713915
| 0.713915
| 0.713915
| 0.713915
| 0.713915
| 0
| 0.007358
| 0.213542
| 1,728
| 62
| 86
| 27.870968
| 0.844003
| 0.311343
| 0
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c7f8b7459fcb4a583080ba3e5c8c88b603610b7
| 39
|
py
|
Python
|
pygads/__init__.py
|
dacker-team/pygads
|
44e6182c2a4f0c663be2851747c4a02135ce6e70
|
[
"MIT"
] | null | null | null |
pygads/__init__.py
|
dacker-team/pygads
|
44e6182c2a4f0c663be2851747c4a02135ce6e70
|
[
"MIT"
] | null | null | null |
pygads/__init__.py
|
dacker-team/pygads
|
44e6182c2a4f0c663be2851747c4a02135ce6e70
|
[
"MIT"
] | null | null | null |
from pygads.GoogleAds import GoogleAds
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92bb143d6446635359e86e0ff4941a8fe91aa472
| 5,984
|
py
|
Python
|
tests/test_aiohttp_validate.py
|
alu0100832211/aiohttp_validate
|
894ac6a4d84b7bb9a480e34a78ea6d5a518f9fd3
|
[
"MIT"
] | 55
|
2016-10-12T17:44:41.000Z
|
2021-03-31T15:24:13.000Z
|
tests/test_aiohttp_validate.py
|
alu0100832211/aiohttp_validate
|
894ac6a4d84b7bb9a480e34a78ea6d5a518f9fd3
|
[
"MIT"
] | 447
|
2016-12-13T23:39:25.000Z
|
2022-03-27T20:38:18.000Z
|
tests/test_aiohttp_validate.py
|
alu0100832211/aiohttp_validate
|
894ac6a4d84b7bb9a480e34a78ea6d5a518f9fd3
|
[
"MIT"
] | 13
|
2018-02-06T16:15:58.000Z
|
2020-08-21T06:55:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_aiohttp_validate
----------------------------------
Tests for `aiohttp_validate` module.
"""
from datetime import datetime
from aiohttp_validate import validate
from aiohttp import web
@validate(
request_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
},
"required": ["text"],
"additionalProperties": False
},
response_schema=None,
)
async def hello(request, *args):
return "Hello world!"
@validate(
request_schema=None,
response_schema=None,
)
async def invalid_enc(request, decoded):
return datetime.now()
class HelloView(web.View):
@validate(
request_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
},
"required": ["text"],
"additionalProperties": False
},
response_schema=None,
)
async def get(self, data, request):
return "Hello world!"
@validate(
request_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
},
"required": ["text"],
"additionalProperties": False
},
response_schema=None,
)
async def post(self, data, request):
return "Hello world!"
@validate(
request_schema=None,
response_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
},
"required": ["text"],
"additionalProperties": False
}
)
async def validate_output(request, *args):
return request
@validate(
request_schema={
"type": "object",
"required": ["firstName", "nested"],
"properties": {
"firstName": {"type": "string"},
"nested": {
"type": "object",
"required": ["test_for_nested"],
"properties": {
"test_for_nested": {"type": "string"}
}
}
}
},
response_schema=None,
)
async def validate_nested_errors(request, *args):
return request
async def test_invalid_request(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_post('/', hello)
app.router.add_get('/', hello)
client = await aiohttp_client(app)
resp = await client.get('/')
assert resp.status == 400
text = await resp.json()
assert 'Request is malformed' in text["error"]
resp = await client.post('/')
assert resp.status == 400
text = await resp.json()
assert 'Request is malformed' in text["error"]
resp = await client.post('/', data="123afasdf")
assert resp.status == 400
text = await resp.json()
assert 'Request is malformed' in text["error"]
async def test_wrong_request_format(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_post('/', hello)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"nottext": "foobar"}')
assert resp.status == 400
text = await resp.json()
assert 'Request is invalid' in text["error"]
assert text["errors"]
async def test_correct_request(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_post('/', hello)
app.router.add_get('/', hello)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"text": "foobar"}')
assert resp.status == 200
text = await resp.text()
assert 'Hello world' in text
resp = await client.get('/', data='{"text": "foobar"}')
assert resp.status == 200
text = await resp.text()
assert 'Hello world' in text
async def test_invalid_response(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_post('/', invalid_enc)
app.router.add_get('/', invalid_enc)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"text": "foobar"}')
assert resp.status == 500
text = await resp.json()
assert 'Response is malformed' in text["error"]
resp = await client.get('/', data='{"text": "foobar"}')
assert resp.status == 500
text = await resp.json()
assert 'Response is malformed' in text["error"]
async def test_wrong_response_format(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_post('/', validate_output)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"text": "foobar"}')
text = await resp.json()
assert resp.status == 200
assert text["text"] == "foobar"
resp = await client.post('/', data='123')
assert resp.status == 400
text = await resp.json()
assert "Request is invalid" in text["error"]
assert text["errors"]
async def test_class_based_valid_request(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_view('/', HelloView)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"text": "foobar"}')
assert resp.status == 200
text = await resp.text()
assert 'Hello world' in text
resp = await client.get('/', data='{"text": "foobar"}')
assert resp.status == 200
text = await resp.text()
assert 'Hello world' in text
async def test_nested_errors(aiohttp_client, loop):
app = web.Application(loop=loop)
app.router.add_view('/', validate_nested_errors)
client = await aiohttp_client(app)
resp = await client.post('/', data='{"nested": {}}')
assert resp.status == 400
text = await resp.json()
# response for errors whould be like:
# "errors": {
# "firstName": ["\'firstName\' is a required property"],
# "nested": {
# "test_for_nested": ["\'test_for_nested\' is a required property"]
# }
# }
errors = text["errors"]
assert errors["firstName"]
assert errors["nested"]["test_for_nested"]
| 26.477876
| 79
| 0.590241
| 669
| 5,984
| 5.164425
| 0.122571
| 0.052677
| 0.05644
| 0.044284
| 0.74877
| 0.714327
| 0.714327
| 0.714327
| 0.703907
| 0.650651
| 0
| 0.010302
| 0.253844
| 5,984
| 225
| 80
| 26.595556
| 0.763494
| 0.057152
| 0
| 0.64881
| 0
| 0
| 0.161216
| 0
| 0
| 0
| 0
| 0
| 0.172619
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.059524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
92f172e5abf61df4e63cc9a5d84b6ec2a0b3c0db
| 159
|
py
|
Python
|
importCSVJSON/admin.py
|
alokknight/smartserve
|
b52504525902f0ef72251c553156d6ff7e5f540c
|
[
"MIT"
] | 2
|
2022-01-02T20:53:33.000Z
|
2022-01-06T16:17:32.000Z
|
importCSVJSON/admin.py
|
alokknight/smartserve
|
b52504525902f0ef72251c553156d6ff7e5f540c
|
[
"MIT"
] | null | null | null |
importCSVJSON/admin.py
|
alokknight/smartserve
|
b52504525902f0ef72251c553156d6ff7e5f540c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Form_fields,Csv
admin.site.register(Form_fields)
admin.site.register(Csv)
| 19.875
| 36
| 0.805031
| 24
| 159
| 5.25
| 0.541667
| 0.15873
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 159
| 7
| 37
| 22.714286
| 0.893617
| 0.163522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92f73b790df818397af9f386a5f4c14fd19b6177
| 102
|
py
|
Python
|
blog/admin.py
|
mr-shubhamsinghal/Django-Routers
|
1be4a96dc07c553efe97d2e99c0ffc900330634f
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
mr-shubhamsinghal/Django-Routers
|
1be4a96dc07c553efe97d2e99c0ffc900330634f
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
mr-shubhamsinghal/Django-Routers
|
1be4a96dc07c553efe97d2e99c0ffc900330634f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from blog.models import BlogModel
admin.site.register(BlogModel)
| 12.75
| 33
| 0.813725
| 14
| 102
| 5.928571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127451
| 102
| 7
| 34
| 14.571429
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
133d75a2c48c56ee0ccfe110c732a582453610cd
| 130
|
py
|
Python
|
notification/admin.py
|
Atwinenickson/lendsuphumanresourcemanagement
|
b46df164d59a4e94300376d679e07bd9a60d6343
|
[
"MIT",
"Unlicense"
] | 36
|
2019-11-26T11:46:32.000Z
|
2022-02-17T13:18:18.000Z
|
notification/admin.py
|
Atwinenickson/lendsuphumanresourcemanagement
|
b46df164d59a4e94300376d679e07bd9a60d6343
|
[
"MIT",
"Unlicense"
] | 13
|
2020-02-14T09:30:16.000Z
|
2022-03-12T00:58:09.000Z
|
notification/admin.py
|
Atwinenickson/lendsuphumanresourcemanagement
|
b46df164d59a4e94300376d679e07bd9a60d6343
|
[
"MIT",
"Unlicense"
] | 16
|
2019-06-14T12:11:29.000Z
|
2022-02-14T15:16:07.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Notification
admin.site.register(Notification)
| 21.666667
| 33
| 0.823077
| 17
| 130
| 6.294118
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 130
| 6
| 33
| 21.666667
| 0.930435
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1367dc7168a835d6c86ca93634a6d8d80d888018
| 161
|
py
|
Python
|
common/__init__.py
|
Reimilia/Proxy_Server
|
d04dd083604fd39d161dd0382b49d2d3f4ab8b80
|
[
"MIT"
] | null | null | null |
common/__init__.py
|
Reimilia/Proxy_Server
|
d04dd083604fd39d161dd0382b49d2d3f4ab8b80
|
[
"MIT"
] | null | null | null |
common/__init__.py
|
Reimilia/Proxy_Server
|
d04dd083604fd39d161dd0382b49d2d3f4ab8b80
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
from config import SERVER_BASE,PRIVACY_BASE
from json_parser import list2json,json2list
from resp_wrapper import filter_policy
| 23
| 43
| 0.838509
| 24
| 161
| 5.416667
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.099379
| 161
| 6
| 44
| 26.833333
| 0.882759
| 0
| 0
| 0
| 0
| 0
| 0.012422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
138d113e5b35e677f7129af7ce2e61c69cded680
| 19
|
py
|
Python
|
app/middlewares/__init__.py
|
oseme-techguy/python-pdf-annotation-api-demo
|
b86dd4e20e9cc13237eacc9a32bb142d4bb28755
|
[
"MIT"
] | 1
|
2019-10-10T17:15:23.000Z
|
2019-10-10T17:15:23.000Z
|
app/middlewares/__init__.py
|
oseme-techguy/python-pdf-annotation-api-demo
|
b86dd4e20e9cc13237eacc9a32bb142d4bb28755
|
[
"MIT"
] | null | null | null |
app/middlewares/__init__.py
|
oseme-techguy/python-pdf-annotation-api-demo
|
b86dd4e20e9cc13237eacc9a32bb142d4bb28755
|
[
"MIT"
] | null | null | null |
"""Middlewares"""
| 6.333333
| 17
| 0.578947
| 1
| 19
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 19
| 2
| 18
| 9.5
| 0.647059
| 0.578947
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
139b1e7a838bce5244c71bbd93c4679607e21a52
| 66
|
py
|
Python
|
codigo.py
|
RobertoManzoA01748301/HCAP2021
|
fc3f834d276cbb47a6d10ebf323ab61c709f5901
|
[
"MIT"
] | null | null | null |
codigo.py
|
RobertoManzoA01748301/HCAP2021
|
fc3f834d276cbb47a6d10ebf323ab61c709f5901
|
[
"MIT"
] | null | null | null |
codigo.py
|
RobertoManzoA01748301/HCAP2021
|
fc3f834d276cbb47a6d10ebf323ab61c709f5901
|
[
"MIT"
] | null | null | null |
import math
print(math.pi)
print('Hola, esta es una nueva linea')
| 16.5
| 38
| 0.742424
| 12
| 66
| 4.083333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 66
| 3
| 39
| 22
| 0.859649
| 0
| 0
| 0
| 0
| 0
| 0.439394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
139be43390434bcb2a38161d2ba9b4d203ac6eef
| 130
|
py
|
Python
|
mindefuse/strategy/knuth/score_count/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
mindefuse/strategy/knuth/score_count/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | 1
|
2019-08-22T19:51:12.000Z
|
2019-08-22T19:51:12.000Z
|
mindefuse/strategy/knuth/score_count/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.7
from .score_count import ScoreCount
from .simple import SimpleScore
from .parallel import ParallelScore
| 21.666667
| 35
| 0.815385
| 18
| 130
| 5.833333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.115385
| 130
| 5
| 36
| 26
| 0.895652
| 0.176923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13c6ac3e03efcb9597799e7818b124223e6b500e
| 2,483
|
py
|
Python
|
source_convert_IFF_code/341/comb_mslp_IFF_jasmin.py
|
glamod/glamod-nuim
|
eed6f9d7d71b0c456ef39fdea6b58677e13ab50c
|
[
"BSD-3-Clause"
] | null | null | null |
source_convert_IFF_code/341/comb_mslp_IFF_jasmin.py
|
glamod/glamod-nuim
|
eed6f9d7d71b0c456ef39fdea6b58677e13ab50c
|
[
"BSD-3-Clause"
] | 23
|
2022-01-28T13:57:39.000Z
|
2022-03-28T09:34:41.000Z
|
source_convert_IFF_code/341/comb_mslp_IFF_jasmin.py
|
glamod/glamod-nuim
|
eed6f9d7d71b0c456ef39fdea6b58677e13ab50c
|
[
"BSD-3-Clause"
] | 1
|
2019-01-24T12:06:06.000Z
|
2019-01-24T12:06:06.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 14:22:41 2019
@author: snoone
"""
import os
import glob
import pandas as pd
import csv
##import all csv files in current dir that need timezone changing to GMT based on hours offset
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_a")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df1 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_b")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
df2 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_c")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df3 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_d")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df4 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_e")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df5 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
os.chdir("/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341_f")
extension = 'psv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
df6 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])
df_final=pd.concat([df1,df2,df3,df4,df5,df6], axis=0)
del df1,df2,df3,df4,df5,df6
df_final['Station_ID'] = df_final['Station_ID'].astype(str)
os.chdir(r"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/sea_level_pressure/341")
cats = sorted(df_final['Station_ID'].unique())
for cat in cats:
outfilename = cat + "_sea_level_pressure_341.psv"
print(outfilename)
df_final[df_final["Station_ID"] == cat].to_csv(outfilename,sep='|',index=False)
| 39.412698
| 104
| 0.724527
| 416
| 2,483
| 4.129808
| 0.225962
| 0.083818
| 0.074505
| 0.088475
| 0.75844
| 0.75844
| 0.737485
| 0.737485
| 0.737485
| 0.737485
| 0
| 0.054388
| 0.118808
| 2,483
| 63
| 105
| 39.412698
| 0.730804
| 0.126057
| 0
| 0.324324
| 0
| 0
| 0.358644
| 0.316141
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.108108
| 0
| 0.108108
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13ebec7c0d87c45694c391d2b6e46ff1dfbba84a
| 57
|
py
|
Python
|
rltorch/distributed/__init__.py
|
cindycia/Atari-SAC-Discrete
|
5d92339f3efbac34488a14db024499b8951fc3b3
|
[
"MIT"
] | 16
|
2019-11-15T13:37:20.000Z
|
2022-01-24T10:29:38.000Z
|
rltorch/distributed/__init__.py
|
cindycia/Atari-SAC-Discrete
|
5d92339f3efbac34488a14db024499b8951fc3b3
|
[
"MIT"
] | 1
|
2020-05-09T18:24:21.000Z
|
2020-05-10T12:44:39.000Z
|
rltorch/distributed/__init__.py
|
ku2482/rltorch
|
7819af49d95bfa268e00413a7606564b0e7286a7
|
[
"MIT"
] | 3
|
2020-12-21T08:21:15.000Z
|
2022-01-24T10:29:43.000Z
|
from .run import run_actor, run_learner, run_distributed
| 28.5
| 56
| 0.842105
| 9
| 57
| 5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 57
| 1
| 57
| 57
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9157314febba44e077157623603c54a07641308
| 1,728
|
py
|
Python
|
validators/credit_card.py
|
uditjuneja/open-finanace-india
|
856387f3327e9aa67aa16ab2e89ff4fce2e99878
|
[
"MIT"
] | 5
|
2020-12-24T14:11:16.000Z
|
2022-02-26T21:25:41.000Z
|
validators/credit_card.py
|
uditjuneja/open-finance-india
|
856387f3327e9aa67aa16ab2e89ff4fce2e99878
|
[
"MIT"
] | null | null | null |
validators/credit_card.py
|
uditjuneja/open-finance-india
|
856387f3327e9aa67aa16ab2e89ff4fce2e99878
|
[
"MIT"
] | null | null | null |
import re
RE_CREDIT_CARD = {
"Amex Card": re.compile(r"^3[47][0-9]{13}$"),
"BCGlobal": re.compile(r"^(6541|6556)[0-9]{12}$"),
"Carte Blanche Card": re.compile(r"^389[0-9]{11}$"),
"Diners Club Card": re.compile(r"^3(?:0[0-5]|[68][0-9])[0-9]{11}$"),
"Discover Card": re.compile(
r"^65[4-9][0-9]{13}|64[4-9][0-9]{13}|6011[0-9]{12}|(622(?:12[6-9]|1[3-9][0-9]|[2-8][0-9][0-9]|9[01][0-9]|92[0-5])[0-9]{10})$"
),
"Insta Payment Card": re.compile(r"^63[7-9][0-9]{13}$"),
"JCB Card": re.compile(r"^(?:2131|1800|35\d{3})\d{11}$"),
"KoreanLocalCard": re.compile(r"^9[0-9]{15}$"),
"Laser Card": re.compile(r"^(6304|6706|6709|6771)[0-9]{12,15}$"),
"Maestro Card": re.compile(r"^(5018|5020|5038|6304|6759|6761|6763)[0-9]{8,15}$"),
"Mastercard": re.compile(
r"^(5[1-5][0-9]{14}|2(22[1-9][0-9]{12}|2[3-9][0-9]{13}|[3-6][0-9]{14}|7[0-1][0-9]{13}|720[0-9]{12}))$"
),
"Solo Card": re.compile(
r"^(6334|6767)[0-9]{12}|(6334|6767)[0-9]{14}|(6334|6767)[0-9]{15}$"
),
"Switch Card": re.compile(
r"^(4903|4905|4911|4936|6333|6759)[0-9]{12}|(4903|4905|4911|4936|6333|6759)[0-9]{14}|(4903|4905|4911|4936|6333|6759)[0-9]{15}|564182[0-9]{10}|564182[0-9]{12}|564182[0-9]{13}|633110[0-9]{10}|633110[0-9]{12}|633110[0-9]{13}$"
),
"Union Pay Card": re.compile(r"^(62[0-9]{14,17})$"),
"Visa Card": re.compile(r"^4[0-9]{12}(?:[0-9]{3})?$"),
"Visa Master Card": re.compile(r"^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14})$"),
}
def is_cc_valid(CC_NO: str):
global RE_CREDIT_CARD
for provider in RE_CREDIT_CARD:
reg_compile = RE_CREDIT_CARD[provider]
if reg_compile.match(CC_NO):
return provider
return False
| 43.2
| 231
| 0.543981
| 346
| 1,728
| 2.676301
| 0.277457
| 0.088553
| 0.172786
| 0.196544
| 0.192225
| 0.146868
| 0.131749
| 0.131749
| 0.047516
| 0.047516
| 0
| 0.287441
| 0.13831
| 1,728
| 39
| 232
| 44.307692
| 0.334453
| 0
| 0
| 0.117647
| 0
| 0.205882
| 0.587963
| 0.429398
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9534ecd8cc680c48ef160e4be0d0d4b829e302b
| 170
|
py
|
Python
|
symopt/solvers/__init__.py
|
spcornelius/symopt
|
6f276ca07cc266af1cd58758a0cf413ab85f2591
|
[
"MIT"
] | null | null | null |
symopt/solvers/__init__.py
|
spcornelius/symopt
|
6f276ca07cc266af1cd58758a0cf413ab85f2591
|
[
"MIT"
] | null | null | null |
symopt/solvers/__init__.py
|
spcornelius/symopt
|
6f276ca07cc266af1cd58758a0cf413ab85f2591
|
[
"MIT"
] | null | null | null |
import symopt.solvers.scipy
import symopt.solvers.ipopt
solve = {'slsqp': scipy.solve_slsqp,
'cobyla': scipy.solve_cobyla,
'ipopt': ipopt.solve_ipopt}
| 24.285714
| 38
| 0.694118
| 21
| 170
| 5.47619
| 0.380952
| 0.208696
| 0.330435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182353
| 170
| 6
| 39
| 28.333333
| 0.827338
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b9aa696e3bb37377db5c12cf613c6dd24deaa8a2
| 101
|
py
|
Python
|
tests/cases/build/optimize_comparison_operators.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | null | null | null |
tests/cases/build/optimize_comparison_operators.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | 5
|
2022-02-12T19:53:08.000Z
|
2022-03-02T04:30:32.000Z
|
tests/cases/build/optimize_comparison_operators.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | null | null | null |
from minpiler.std import M
if 1 < 3 < 5:
M.print("a")
M.print("b")
# > print "a"
# > print "b"
| 11.222222
| 26
| 0.524752
| 19
| 101
| 2.789474
| 0.631579
| 0.226415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.257426
| 101
| 8
| 27
| 12.625
| 0.666667
| 0.227723
| 0
| 0
| 0
| 0
| 0.026667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b9ad74894c61b441770f155e7a1ae7270ab1e657
| 44
|
py
|
Python
|
nmrs.py
|
Bapcraft/NCJDBCMM-MySQL-REST-Server
|
38c639e3e390820094ecb9dd5aec2e1f6f93cbf3
|
[
"MIT"
] | null | null | null |
nmrs.py
|
Bapcraft/NCJDBCMM-MySQL-REST-Server
|
38c639e3e390820094ecb9dd5aec2e1f6f93cbf3
|
[
"MIT"
] | null | null | null |
nmrs.py
|
Bapcraft/NCJDBCMM-MySQL-REST-Server
|
38c639e3e390820094ecb9dd5aec2e1f6f93cbf3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
print 'hello world!'
| 11
| 21
| 0.681818
| 7
| 44
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 3
| 22
| 14.666667
| 0.789474
| 0.454545
| 0
| 0
| 0
| 0
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b9e3663894dff0a8d86561a30f109e3e7b90b96c
| 50
|
py
|
Python
|
scripts/portal/gold_boss_gate.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/portal/gold_boss_gate.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/portal/gold_boss_gate.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# 252020000 - to gold boss
sm.warp(252020700, 0)
| 12.5
| 26
| 0.7
| 8
| 50
| 4.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.463415
| 0.18
| 50
| 3
| 27
| 16.666667
| 0.390244
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a2377c9f296acc2c349ea5e43067c8c10b2d7c1
| 297
|
py
|
Python
|
python/helpers/third_party/thriftpy/_shaded_thriftpy/__init__.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/helpers/third_party/thriftpy/_shaded_thriftpy/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/helpers/third_party/thriftpy/_shaded_thriftpy/__init__.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
# -*- coding: utf-8 -*-
import sys
from .hook import install_import_hook, remove_import_hook
from .parser import load, load_module, load_fp
__version__ = '0.3.8'
__python__ = sys.version_info
__all__ = ["install_import_hook", "remove_import_hook", "load", "load_module",
"load_fp"]
| 24.75
| 79
| 0.717172
| 42
| 297
| 4.47619
| 0.452381
| 0.212766
| 0.180851
| 0.244681
| 0.56383
| 0.351064
| 0
| 0
| 0
| 0
| 0
| 0.015936
| 0.154882
| 297
| 11
| 80
| 27
| 0.733068
| 0.070707
| 0
| 0
| 0
| 0
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a284dad752d65cc166145f2a15948be2229f591
| 3,023
|
py
|
Python
|
Fundamentos de Algoritmos/Prova/Ex_02.py
|
antuniooh/ExercisesFEI
|
fea8855178e098713173aceee0f0616a69c438e4
|
[
"MIT"
] | 1
|
2021-05-05T22:55:03.000Z
|
2021-05-05T22:55:03.000Z
|
Fundamentos de Algoritmos/Prova/Ex_02.py
|
antuniooh/ExercisesFEI
|
fea8855178e098713173aceee0f0616a69c438e4
|
[
"MIT"
] | null | null | null |
Fundamentos de Algoritmos/Prova/Ex_02.py
|
antuniooh/ExercisesFEI
|
fea8855178e098713173aceee0f0616a69c438e4
|
[
"MIT"
] | 1
|
2021-11-24T06:55:09.000Z
|
2021-11-24T06:55:09.000Z
|
# coding: utf-8
# In[1]:
from palavra import aleatoria
def main():
p = aleatoria()
erros = 6
t1 = []
digitadas = ""
#criar list t1 #desenhar os pontinhos
for x in range(len(p)):
t1 += p[x]
for y in range(len(t1)):
t1[y] = "."
print(p)
def impressão():
if erros == 6:
print("X==:==")
print("X : ")
print("X ")
print("X ")
print("X ")
print("X ")
print("======")
elif erros == 5:
print("X==:==")
print("X : ")
print("X 0 ")
print("X ")
print("X ")
print("X ")
print("======")
elif erros == 4:
print("X==:==")
print("X : ")
print("X 0 ")
print("X | ")
print("X ")
print("X ")
print("======")
elif erros == 3:
print("X==:==")
print("X : ")
print("X 0 ")
print("X \| ")
print("X ")
print("X ")
print("======")
elif erros == 2:
print("X==:==")
print("X : ")
print("X 0 ")
print("X \|/ ")
print("X ")
print("X ")
print("======")
elif erros == 1:
print("X==:==")
print("X : ")
print("X 0 ")
print("X \|/ ")
print("X / ")
print("X ")
print("======")
else:
print("X==:==")
print("X : ")
print("X 0 ")
print("X \|/ ")
print("X / \ ")
print("X ")
print("======")
print("Enforcado!")
print(t1)
print("Letras já digitadas: ", digitadas)
print("")
while erros > 0:
ponto = "."
if ponto not in t1:
print("Você Ganhou!")
break
else:
letra = input("Digite uma letra: ")
#lista da string de digitadas
d = []
d = digitadas.replace(","," ").split()
#caso a letra não esteja em digitadas e esteja na palavra
if letra in p and letra not in d:
digitadas += letra + ","
for x in range(len(p)):
if letra == p[x]:
t1[x] = letra
elif letra in d:
print("Letra repetida")
else:
print("Você errou!")
erros -=1
impressão()
if __name__ =="__main__":
main()
| 26.286957
| 69
| 0.296725
| 252
| 3,023
| 3.527778
| 0.265873
| 0.283465
| 0.445444
| 0.391451
| 0.413948
| 0.413948
| 0.380202
| 0.380202
| 0.380202
| 0.370079
| 0
| 0.018464
| 0.552101
| 3,023
| 114
| 70
| 26.517544
| 0.638109
| 0.045981
| 0
| 0.531915
| 0
| 0
| 0.136696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.010638
| 0
| 0.031915
| 0.606383
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
6a395782d2becfd163024e57d5bfbd025d1764ff
| 801
|
py
|
Python
|
allennlp/modules/token_embedders/__init__.py
|
SivilTaram/allennlp
|
b50f22c3f45281c1be6a0fbb041137c3f5bf2131
|
[
"Apache-2.0"
] | 1
|
2019-01-04T15:15:42.000Z
|
2019-01-04T15:15:42.000Z
|
allennlp/modules/token_embedders/__init__.py
|
SivilTaram/allennlp
|
b50f22c3f45281c1be6a0fbb041137c3f5bf2131
|
[
"Apache-2.0"
] | null | null | null |
allennlp/modules/token_embedders/__init__.py
|
SivilTaram/allennlp
|
b50f22c3f45281c1be6a0fbb041137c3f5bf2131
|
[
"Apache-2.0"
] | null | null | null |
"""
A :class:`~allennlp.modules.token_embedders.token_embedder.TokenEmbedder` is a ``Module`` that
embeds one-hot-encoded tokens as vectors.
"""
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders.embedding import Embedding
from allennlp.modules.token_embedders.token_characters_encoder import TokenCharactersEncoder
from allennlp.modules.token_embedders.elmo_token_embedder import ElmoTokenEmbedder
from allennlp.modules.token_embedders.openai_transformer_embedder import OpenaiTransformerEmbedder
from allennlp.modules.token_embedders.bert_token_embedder import BertEmbedder, PretrainedBertEmbedder
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import \
BidirectionalLanguageModelTokenEmbedder
| 57.214286
| 101
| 0.878901
| 90
| 801
| 7.577778
| 0.4
| 0.175953
| 0.234604
| 0.340176
| 0.426686
| 0.184751
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064919
| 801
| 13
| 102
| 61.615385
| 0.910547
| 0.169788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.875
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a3ad8e8c84491ed7931d0702fa86fe6de6be2d6
| 162
|
py
|
Python
|
supabase/__init__.py
|
anoushk1234/supabase-py
|
3e0c0604732a5f6399826455c78e6f4320bc8761
|
[
"MIT"
] | 201
|
2020-08-28T06:29:44.000Z
|
2021-09-13T20:15:15.000Z
|
supabase/__init__.py
|
anoushk1234/supabase-py
|
3e0c0604732a5f6399826455c78e6f4320bc8761
|
[
"MIT"
] | 31
|
2020-08-28T06:38:31.000Z
|
2021-09-12T15:47:48.000Z
|
supabase/__init__.py
|
anoushk1234/supabase-py
|
3e0c0604732a5f6399826455c78e6f4320bc8761
|
[
"MIT"
] | 22
|
2020-10-02T15:32:12.000Z
|
2021-09-13T12:31:12.000Z
|
__version__ = "0.0.3"
from supabase import client, lib
from supabase.client import Client, create_client
__all__ = ["client", "lib", "Client", "create_client"]
| 23.142857
| 54
| 0.734568
| 22
| 162
| 4.954545
| 0.454545
| 0.220183
| 0.330275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.12963
| 162
| 6
| 55
| 27
| 0.751773
| 0
| 0
| 0
| 0
| 0
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e0758bc3639165bb7f5b4747ebb37868a5fe58fc
| 5,206
|
py
|
Python
|
solutions/models.py
|
Kgermando/e-s
|
249ada84c63ffe99a71c1fbb301c533b9f5a3869
|
[
"Apache-2.0"
] | null | null | null |
solutions/models.py
|
Kgermando/e-s
|
249ada84c63ffe99a71c1fbb301c533b9f5a3869
|
[
"Apache-2.0"
] | null | null | null |
solutions/models.py
|
Kgermando/e-s
|
249ada84c63ffe99a71c1fbb301c533b9f5a3869
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.db.models.signals import pre_save
from django.urls import reverse
from es.utils import unique_slug_generator_solution
from es.constant import SECTEUR_ENTREPRISES
from tinymce import HTMLField
# Create your models here.
class Entreprise_solution(models.Model):
"""
Entreprise model
"""
nom = models.CharField(max_length=300)
slug = models.SlugField(blank=True, unique=True, help_text='Laissez ce champ vide')
secteur_entreprise = models.CharField(max_length=300, choices=SECTEUR_ENTREPRISES)
fonctions_entreprise = models.CharField(max_length=500)
logo_entreprise = models.ImageField(upload_to= 'entreprise_solution_img/')
telephone_entreprise = models.DecimalField(max_digits=13, decimal_places=0)
telephone_2_entreprise = models.DecimalField(max_digits=13, decimal_places=0)
email_entreprise = models.EmailField()
site_web = models.URLField(blank=True)
description_entreprise = HTMLField('description_entreprise')
competence_entreprise = models.CharField(max_length=500)
sociauFB_entreprise = models.URLField(blank=True, help_text='Copiez le lien de compte Facebook et collez le ici')
sociauTW_entreprise = models.URLField(blank=True, help_text='Copiez le lien de compte Twitter et collez le ici')
sociauINS_entreprise = models.URLField(blank=True, help_text='Copiez le lien de compte Instagram et collez le ici')
sociauIN_entreprise = models.URLField(blank=True, help_text='Copiez le lien de compte LinkedIN et collez le ici')
page_vues = models.IntegerField(default=0, verbose_name="Nombre des vues")
def __str__(self):
return self.nom
def get_absolute_url(self):
return reverse("solutions:entreprise_solutions_detail", kwargs={"id": self.id})
class Artisans_solution(models.Model):
"""
Artisans model
"""
nom = models.CharField(max_length=300)
slug = models.SlugField(blank=True, unique=True, help_text='Laissez ce champ vide')
secteur_artisans = models.CharField(max_length=300, choices=SECTEUR_ENTREPRISES)
fonctions_artisans = models.CharField(max_length=500)
logo_artisans = models.ImageField(upload_to= 'entreprise_solution_img/')
telephone_artisans = models.DecimalField(max_digits=13, decimal_places=0)
telephone_2_artisans = models.DecimalField(max_digits=13, decimal_places=0)
email_artisans = models.EmailField()
site_web = models.URLField(blank=True)
description_artisans = HTMLField('description_artisans')
competence_artisans = models.CharField(max_length=500)
sociauFB_artisans = models.URLField(blank=True, help_text='Copiez le lien de compte Facebook et collez le ici')
sociauTW_artisans = models.URLField(blank=True, help_text='Copiez le lien de compte Twitter et collez le ici')
sociauINS_artisans = models.URLField(blank=True, help_text='Copiez le lien de compte Instagram et collez le ici')
sociauIN_artisans = models.URLField(blank=True, help_text='Copiez le lien de compte LinkedIN et collez le ici')
page_vues = models.IntegerField(default=0, verbose_name="Nombre des vues")
def __str__(self):
return self.nom
def get_absolute_url(self):
return reverse("solutions:artisans_solution_detail", kwargs={"id": self.id})
class Consultance_solution(models.Model):
"""
Consultance model
"""
nom = models.CharField(max_length=300)
slug = models.SlugField(blank=True, unique=True, help_text='Laissez ce champ vide')
secteur_consultance = models.CharField(max_length=300, choices=SECTEUR_ENTREPRISES)
fonctions_consultance = models.CharField(max_length=500)
logo_consultance = models.ImageField(upload_to= 'entreprise_solution_img/')
telephone_consultance = models.DecimalField(max_digits=13, decimal_places=0)
telephone_2_consultance = models.DecimalField(max_digits=13, decimal_places=0)
email_consultance = models.EmailField()
site_web = models.URLField(blank=True)
description_consultance = HTMLField('description_consultance')
competence_consultance = models.CharField(max_length=500)
sociauFB_consultance = models.URLField(blank=True, help_text='Copiez le lien de compte Facebook et collez le ici')
sociauTW_consultance = models.URLField(blank=True, help_text='Copiez le lien de compte Twitter et collez le ici')
sociauINS_consultance = models.URLField(blank=True, help_text='Copiez le lien de compte Instagram et collez le ici')
sociauIN_consultance = models.URLField(blank=True, help_text='Copiez le lien de compte LinkedIN et collez le ici')
page_vues = models.IntegerField(default=0, verbose_name="Nombre des vues")
def __str__(self):
return self.nom
def get_absolute_url(self):
return reverse("solutions:consultance_solutions_detail", kwargs={"id": self.id})
def tag_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator_solution(instance)
pre_save.connect(tag_pre_save_receiver, sender=Entreprise_solution)
pre_save.connect(tag_pre_save_receiver, sender=Artisans_solution)
pre_save.connect(tag_pre_save_receiver, sender=Consultance_solution)
| 51.039216
| 120
| 0.769497
| 687
| 5,206
| 5.605531
| 0.158661
| 0.042067
| 0.046741
| 0.089587
| 0.81771
| 0.803947
| 0.71462
| 0.71462
| 0.662685
| 0.494417
| 0
| 0.013402
| 0.140031
| 5,206
| 101
| 121
| 51.544554
| 0.846772
| 0.014406
| 0
| 0.28
| 0
| 0
| 0.188791
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.08
| 0.08
| 0.933333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
0eb2539e4312766b6516457648742eb3bd837d0c
| 208
|
py
|
Python
|
valohai_cli/commands/project/__init__.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 11
|
2017-11-06T16:31:46.000Z
|
2020-10-26T09:55:12.000Z
|
valohai_cli/commands/project/__init__.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 147
|
2017-04-06T09:46:11.000Z
|
2022-03-10T16:24:15.000Z
|
valohai_cli/commands/project/__init__.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 4
|
2017-04-16T16:00:51.000Z
|
2021-07-05T11:36:36.000Z
|
import click
from valohai_cli.plugin_cli import PluginCLI
@click.command(cls=PluginCLI, commands_module='valohai_cli.commands.project')
def project() -> None:
"""
Project-related commands.
"""
| 18.909091
| 77
| 0.730769
| 25
| 208
| 5.92
| 0.6
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149038
| 208
| 10
| 78
| 20.8
| 0.836158
| 0.120192
| 0
| 0
| 0
| 0
| 0.167665
| 0.167665
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ec0c866ca9c7df428dc7de55224be10e9e1fad8
| 391
|
py
|
Python
|
best_practice/mobile_web/rdc/test_login_fail.py
|
kmissoumi/demo-python
|
1376d62c80374923edf7afa364697e4a6a9ce56b
|
[
"MIT"
] | null | null | null |
best_practice/mobile_web/rdc/test_login_fail.py
|
kmissoumi/demo-python
|
1376d62c80374923edf7afa364697e4a6a9ce56b
|
[
"MIT"
] | null | null | null |
best_practice/mobile_web/rdc/test_login_fail.py
|
kmissoumi/demo-python
|
1376d62c80374923edf7afa364697e4a6a9ce56b
|
[
"MIT"
] | 1
|
2022-01-07T22:13:29.000Z
|
2022-01-07T22:13:29.000Z
|
def test_valid_login(rdc_browser):
rdc_browser.get('https://www.saucedemo.com/v1')
rdc_browser.find_element_by_id('user-name').send_keys('locked_out_user')
rdc_browser.find_element_by_id('password').send_keys('secret_sauce')
rdc_browser.find_element_by_css_selector('.btn_action').click()
assert rdc_browser.find_element_by_css_selector('.error-button').is_displayed()
| 43.444444
| 83
| 0.785166
| 60
| 391
| 4.633333
| 0.583333
| 0.215827
| 0.201439
| 0.302158
| 0.42446
| 0.42446
| 0.244604
| 0
| 0
| 0
| 0
| 0.00277
| 0.076726
| 391
| 8
| 84
| 48.875
| 0.767313
| 0
| 0
| 0
| 0
| 0
| 0.245524
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0.166667
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0ed15c228176b65928e565e8010386c95c489aa6
| 18,885
|
py
|
Python
|
tests/data_asset/test_expectation_decorators.py
|
lfpll/great_expectations
|
f61fa7c2e6e813cd5ff84ab7403e05271cada257
|
[
"Apache-2.0"
] | 1
|
2020-04-10T18:07:58.000Z
|
2020-04-10T18:07:58.000Z
|
tests/data_asset/test_expectation_decorators.py
|
lfpll/great_expectations
|
f61fa7c2e6e813cd5ff84ab7403e05271cada257
|
[
"Apache-2.0"
] | null | null | null |
tests/data_asset/test_expectation_decorators.py
|
lfpll/great_expectations
|
f61fa7c2e6e813cd5ff84ab7403e05271cada257
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from great_expectations.core import (
ExpectationConfiguration,
ExpectationKwargs,
ExpectationValidationResult,
)
from great_expectations.data_asset import DataAsset
from great_expectations.dataset import MetaPandasDataset, PandasDataset
class ExpectationOnlyDataAsset(DataAsset):
@DataAsset.expectation([])
def no_op_expectation(
self, result_format=None, include_config=True, catch_exceptions=None, meta=None
):
return {"success": True}
@DataAsset.expectation(["value"])
def no_op_value_expectation(
self,
value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return {"success": True}
@DataAsset.expectation([])
def exception_expectation(
self, result_format=None, include_config=True, catch_exceptions=None, meta=None
):
raise ValueError("Gotcha!")
def test_expectation_decorator_build_config():
eds = ExpectationOnlyDataAsset()
eds.no_op_expectation()
eds.no_op_value_expectation("a")
config = eds.get_expectation_suite()
assert (
ExpectationConfiguration(expectation_type="no_op_expectation", kwargs={})
== config.expectations[0]
)
assert (
ExpectationConfiguration(
expectation_type="no_op_value_expectation",
kwargs=ExpectationKwargs({"value": "a"}),
)
== config.expectations[1]
)
def test_expectation_decorator_include_config():
eds = ExpectationOnlyDataAsset()
out = eds.no_op_value_expectation("a", include_config=True)
assert (
ExpectationConfiguration(
expectation_type="no_op_value_expectation",
kwargs={"value": "a", "result_format": "BASIC"},
)
== out.expectation_config
)
def test_expectation_decorator_meta():
metadata = {"meta_key": "meta_value"}
eds = ExpectationOnlyDataAsset()
out = eds.no_op_value_expectation("a", meta=metadata)
config = eds.get_expectation_suite()
assert (
ExpectationValidationResult(
success=True, meta=metadata, expectation_config=config.expectations[0]
)
== out
)
assert (
ExpectationConfiguration(
expectation_type="no_op_value_expectation",
kwargs={"value": "a"},
meta=metadata,
)
== config.expectations[0]
)
def test_expectation_decorator_catch_exceptions():
eds = ExpectationOnlyDataAsset()
# Confirm that we would raise an error without catching exceptions
with pytest.raises(ValueError):
eds.exception_expectation(catch_exceptions=False)
# Catch exceptions and validate results
out = eds.exception_expectation(catch_exceptions=True)
assert out.exception_info["raised_exception"] is True
# Check only the first and last line of the traceback, since formatting can be platform dependent.
assert (
"Traceback (most recent call last):"
== out.exception_info["exception_traceback"].split("\n")[0]
)
assert (
"ValueError: Gotcha!"
== out.exception_info["exception_traceback"].split("\n")[-2]
)
def test_pandas_column_map_decorator_partial_exception_counts():
df = PandasDataset({"a": [0, 1, 2, 3, 4]})
out = df.expect_column_values_to_be_between(
"a",
3,
4,
result_format={"result_format": "COMPLETE", "partial_unexpected_count": 1},
)
assert 1 == len(out.result["partial_unexpected_counts"])
assert 3 == len(out.result["unexpected_list"])
def test_column_map_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_odd(self, column):
return column.map(lambda x: x % 2)
@MetaPandasDataset.column_map_expectation
def expectation_that_crashes_on_sixes(self, column):
return column.map(lambda x: (x - 6) / 0 != "duck")
df = CustomPandasDataset(
{
"all_odd": [1, 3, 5, 5, 5, 7, 9, 9, 9, 11],
"mostly_odd": [1, 3, 5, 7, 9, 2, 4, 1, 3, 5],
"all_even": [2, 4, 4, 6, 6, 6, 8, 8, 8, 8],
"odd_missing": [1, 3, 5, None, None, None, None, 1, 3, None],
"mixed_missing": [1, 3, 5, None, None, 2, 4, 1, 3, None],
"all_missing": [None, None, None, None, None, None, None, None, None, None],
}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_values_to_be_odd("all_odd") == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 0,
"missing_percent": 0.0,
"partial_unexpected_counts": [],
"partial_unexpected_index_list": [],
"partial_unexpected_list": [],
"unexpected_count": 0,
"unexpected_index_list": [],
"unexpected_list": [],
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
success=True,
)
assert df.expect_column_values_to_be_odd(
"all_missing"
) == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 10,
"missing_percent": 100.0,
"partial_unexpected_counts": [],
"partial_unexpected_index_list": [],
"partial_unexpected_list": [],
"unexpected_count": 0,
"unexpected_index_list": [],
"unexpected_list": [],
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": None,
},
success=True,
)
assert df.expect_column_values_to_be_odd(
"odd_missing"
) == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 5,
"missing_percent": 50.0,
"partial_unexpected_counts": [],
"partial_unexpected_index_list": [],
"partial_unexpected_list": [],
"unexpected_count": 0,
"unexpected_index_list": [],
"unexpected_list": [],
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
success=True,
)
assert df.expect_column_values_to_be_odd(
"mixed_missing"
) == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 3,
"missing_percent": 30.0,
"partial_unexpected_counts": [
{"value": 2.0, "count": 1},
{"value": 4.0, "count": 1},
],
"partial_unexpected_index_list": [5, 6],
"partial_unexpected_list": [2.0, 4.0],
"unexpected_count": 2,
"unexpected_index_list": [5, 6],
"unexpected_list": [2, 4],
"unexpected_percent": 20.0,
"unexpected_percent_nonmissing": (2 / 7 * 100),
},
success=False,
)
assert df.expect_column_values_to_be_odd(
"mostly_odd"
) == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 0,
"missing_percent": 0,
"partial_unexpected_counts": [
{"value": 2.0, "count": 1},
{"value": 4.0, "count": 1},
],
"partial_unexpected_index_list": [5, 6],
"partial_unexpected_list": [2.0, 4.0],
"unexpected_count": 2,
"unexpected_index_list": [5, 6],
"unexpected_list": [2, 4],
"unexpected_percent": 20.0,
"unexpected_percent_nonmissing": 20.0,
},
success=False,
)
assert df.expect_column_values_to_be_odd(
"mostly_odd", mostly=0.6
) == ExpectationValidationResult(
result={
"element_count": 10,
"missing_count": 0,
"missing_percent": 0,
"partial_unexpected_counts": [
{"value": 2.0, "count": 1},
{"value": 4.0, "count": 1},
],
"partial_unexpected_index_list": [5, 6],
"partial_unexpected_list": [2.0, 4.0],
"unexpected_count": 2,
"unexpected_index_list": [5, 6],
"unexpected_list": [2, 4],
"unexpected_percent": 20.0,
"unexpected_percent_nonmissing": 20.0,
},
success=True,
)
assert df.expect_column_values_to_be_odd(
"mostly_odd", result_format="BOOLEAN_ONLY"
) == ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BOOLEAN_ONLY"
assert df.expect_column_values_to_be_odd(
"mostly_odd"
) == ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BASIC"
assert df.expect_column_values_to_be_odd(
"mostly_odd", include_config=True
) == ExpectationValidationResult(
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_values_to_be_odd",
kwargs={"column": "mostly_odd", "result_format": "BASIC"},
),
result={
"element_count": 10,
"missing_count": 0,
"missing_percent": 0,
"partial_unexpected_list": [2, 4],
"unexpected_count": 2,
"unexpected_percent": 20.0,
"unexpected_percent_nonmissing": 20.0,
},
success=False,
)
def test_column_aggregate_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@PandasDataset.column_aggregate_expectation
def expect_column_median_to_be_odd(self, column):
median = self.get_column_median(column)
return {"success": median % 2, "result": {"observed_value": median}}
df = CustomPandasDataset(
{
"all_odd": [1, 3, 5, 7, 9],
"all_even": [2, 4, 6, 8, 10],
"odd_missing": [1, 3, 5, None, None],
"mixed_missing": [1, 2, None, None, 6],
"mixed_missing_2": [1, 3, None, None, 6],
"all_missing": [None, None, None, None, None,],
}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_median_to_be_odd("all_odd") == ExpectationValidationResult(
result={
"observed_value": 5,
"element_count": 5,
"missing_count": 0,
"missing_percent": 0,
},
success=True,
)
assert df.expect_column_median_to_be_odd("all_even") == ExpectationValidationResult(
result={
"observed_value": 6,
"element_count": 5,
"missing_count": 0,
"missing_percent": 0,
},
success=False,
)
assert df.expect_column_median_to_be_odd(
"all_even", result_format="SUMMARY"
) == ExpectationValidationResult(
result={
"observed_value": 6,
"element_count": 5,
"missing_count": 0,
"missing_percent": 0,
},
success=False,
)
assert df.expect_column_median_to_be_odd(
"all_even", result_format="BOOLEAN_ONLY"
) == ExpectationValidationResult(success=False)
df.default_expectation_args["result_format"] = "BOOLEAN_ONLY"
assert df.expect_column_median_to_be_odd("all_even") == ExpectationValidationResult(
success=False
)
assert df.expect_column_median_to_be_odd(
"all_even", result_format="BASIC"
) == ExpectationValidationResult(
result={
"observed_value": 6,
"element_count": 5,
"missing_count": 0,
"missing_percent": 0,
},
success=False,
)
def test_column_pair_map_expectation_decorator():
# Create a new CustomPandasDataset to
# (1) Demonstrate that custom subclassing works, and
# (2) Test expectation business logic without dependencies on any other functions.
class CustomPandasDataset(PandasDataset):
@PandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_different(
self,
column_A,
column_B,
keep_missing="either",
output_format=None,
include_config=True,
catch_exceptions=None,
):
return column_A != column_B
df = CustomPandasDataset(
{
"all_odd": [1, 3, 5, 7, 9],
"all_even": [2, 4, 6, 8, 10],
"odd_missing": [1, 3, 5, None, None],
"mixed_missing": [1, 2, None, None, 6],
"mixed_missing_2": [1, 3, None, None, 6],
"all_missing": [None, None, None, None, None,],
}
)
df.set_default_expectation_argument("result_format", "COMPLETE")
df.set_default_expectation_argument("include_config", False)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even"
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"unexpected_list": [],
"unexpected_index_list": [],
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even", ignore_row_if="both_values_are_missing",
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"unexpected_list": [],
"unexpected_index_list": [],
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "odd_missing"
) == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 3,
"missing_percent": 0.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 60.0,
"unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{"count": 1, "value": (1, 1.0)},
{"count": 1, "value": (3, 3.0)},
{"count": 1, "value": (5, 5.0)},
],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "odd_missing", ignore_row_if="both_values_are_missing"
) == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 3,
"missing_percent": 0.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 60.0,
"unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{"count": 1, "value": (1, 1.0)},
{"count": 1, "value": (3, 3.0)},
{"count": 1, "value": (5, 5.0)},
],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "odd_missing", ignore_row_if="either_value_is_missing"
) == ExpectationValidationResult(
success=False,
result={
"element_count": 5,
"missing_count": 2,
"unexpected_count": 3,
"missing_percent": 40.0,
"unexpected_percent": 60.0,
"unexpected_percent_nonmissing": 100.0,
"unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"unexpected_index_list": [0, 1, 2],
"partial_unexpected_list": [(1, 1.0), (3, 3.0), (5, 5.0)],
"partial_unexpected_index_list": [0, 1, 2],
"partial_unexpected_counts": [
{"count": 1, "value": (1, 1.0)},
{"count": 1, "value": (3, 3.0)},
{"count": 1, "value": (5, 5.0)},
],
},
)
with pytest.raises(ValueError):
df.expect_column_pair_values_to_be_different(
"all_odd", "odd_missing", ignore_row_if="blahblahblah"
)
# Test SUMMARY, BASIC, and BOOLEAN_ONLY output_formats
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even", result_format="SUMMARY"
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"partial_unexpected_list": [],
"partial_unexpected_index_list": [],
"partial_unexpected_counts": [],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even", result_format="BASIC"
) == ExpectationValidationResult(
success=True,
result={
"element_count": 5,
"missing_count": 0,
"unexpected_count": 0,
"missing_percent": 0.0,
"unexpected_percent": 0.0,
"unexpected_percent_nonmissing": 0.0,
"partial_unexpected_list": [],
},
)
assert df.expect_column_pair_values_to_be_different(
"all_odd", "all_even", result_format="BOOLEAN_ONLY"
) == ExpectationValidationResult(success=True)
| 33.424779
| 102
| 0.579613
| 1,967
| 18,885
| 5.237926
| 0.087443
| 0.043774
| 0.033971
| 0.044647
| 0.803358
| 0.781132
| 0.753373
| 0.730079
| 0.695331
| 0.660875
| 0
| 0.032999
| 0.297167
| 18,885
| 564
| 103
| 33.484043
| 0.743238
| 0.040032
| 0
| 0.639344
| 0
| 0
| 0.245653
| 0.10069
| 0
| 0
| 0
| 0
| 0.067623
| 1
| 0.030738
| false
| 0
| 0.008197
| 0.010246
| 0.059426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ee42d7d20a116a80d10b2bff0cf787200a8bfbb
| 76
|
py
|
Python
|
colorchron/led/__init__.py
|
harmsm/pantone
|
23f7875680666f5546757f988e872f86ad76b888
|
[
"MIT"
] | null | null | null |
colorchron/led/__init__.py
|
harmsm/pantone
|
23f7875680666f5546757f988e872f86ad76b888
|
[
"MIT"
] | null | null | null |
colorchron/led/__init__.py
|
harmsm/pantone
|
23f7875680666f5546757f988e872f86ad76b888
|
[
"MIT"
] | null | null | null |
from .base import LED
from .gpio import GPIO
from .neopixel import Neopixel
| 19
| 30
| 0.802632
| 12
| 76
| 5.083333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 76
| 3
| 31
| 25.333333
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
163a933e68d5b013a7b3c13df955469180ef3b2f
| 96
|
py
|
Python
|
eero_adguard_sync/models/adguard/__init__.py
|
amickael/eero-adguard-sync
|
820db5cc69b96c25b0e2bdef83d911cac6e4ade1
|
[
"MIT"
] | 2
|
2022-01-09T16:04:37.000Z
|
2022-01-09T19:14:48.000Z
|
eero_adguard_sync/models/adguard/__init__.py
|
amickael/eero-adguard-sync
|
820db5cc69b96c25b0e2bdef83d911cac6e4ade1
|
[
"MIT"
] | 1
|
2022-03-25T20:27:58.000Z
|
2022-03-28T00:39:04.000Z
|
eero_adguard_sync/models/adguard/__init__.py
|
amickael/eero-adguard-sync
|
820db5cc69b96c25b0e2bdef83d911cac6e4ade1
|
[
"MIT"
] | null | null | null |
from .client_device import AdGuardClientDevice
from .credential_set import AdGuardCredentialSet
| 32
| 48
| 0.895833
| 10
| 96
| 8.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 49
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16b01d539f174d5e69dbaafa1a74484de4400357
| 125
|
py
|
Python
|
Python/Tests/TestData/WFastCgi/BadHandler7/myapp.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/WFastCgi/BadHandler7/myapp.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/WFastCgi/BadHandler7/myapp.py
|
RaymonGulati1/PTVS
|
ee1d09f2a94be4e21016f7579205bb65ec82c616
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
import sys
sys.stderr.write('something to std err')
print('something to std out')
raise Exception('handler file is raising')
| 25
| 42
| 0.768
| 20
| 125
| 4.8
| 0.8
| 0.229167
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 125
| 4
| 43
| 31.25
| 0.872727
| 0
| 0
| 0
| 0
| 0
| 0.504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16d91e9e1300e3185f0905a42d188b7a165d1a2a
| 276
|
py
|
Python
|
app/source/geo_django_rf/restapi/serializers/__init__.py
|
JanNash/geo-django-rf-server
|
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
|
[
"BSD-3-Clause"
] | null | null | null |
app/source/geo_django_rf/restapi/serializers/__init__.py
|
JanNash/geo-django-rf-server
|
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
|
[
"BSD-3-Clause"
] | null | null | null |
app/source/geo_django_rf/restapi/serializers/__init__.py
|
JanNash/geo-django-rf-server
|
57a2d12204cdd8abceaa1c46c22f2947a8d45c20
|
[
"BSD-3-Clause"
] | null | null | null |
from .user_serializer import UserSerializer
from .group_serializer import GroupSerializer
from .profile_serializer import ProfileSerializer
from .photo_serializer import PhotoSerializer
__all__ = ['UserSerializer', 'GroupSerializer', 'ProfileSerializer', 'PhotoSerializer']
| 34.5
| 87
| 0.847826
| 25
| 276
| 9.04
| 0.48
| 0.283186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 276
| 7
| 88
| 39.428571
| 0.896825
| 0
| 0
| 0
| 0
| 0
| 0.221014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bc458174a34fb2c055273244f97056d8a0c44a39
| 45,408
|
pyt
|
Python
|
EBAR Tools.pyt
|
NatureServe-Canada/EBARTools
|
b7e24f4fcec4eb2f2073794266b382f8a964f73e
|
[
"CC-BY-4.0"
] | 6
|
2019-10-03T17:35:19.000Z
|
2022-02-17T22:06:07.000Z
|
EBAR Tools.pyt
|
NatureServe-Canada/EBARTools
|
b7e24f4fcec4eb2f2073794266b382f8a964f73e
|
[
"CC-BY-4.0"
] | 6
|
2022-03-28T15:37:31.000Z
|
2022-03-30T17:16:15.000Z
|
EBAR Tools.pyt
|
NatureServe-Canada/EBARTools
|
b7e24f4fcec4eb2f2073794266b382f8a964f73e
|
[
"CC-BY-4.0"
] | 2
|
2021-12-29T00:37:00.000Z
|
2022-01-30T01:04:28.000Z
|
# Project: Ecosytem-based Automated Range Mapping (EBAR)
# Credits: Randal Greene, Christine Terwissen, Meg Southee
# © NatureServe Canada 2020 under CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/)
# Program: EBAR Tools.pyt
# ArcGIS Python toolbox for importing species datasets and generating EBAR maps
# Notes:
# - following 120 maximum line length "convention"
# - tested with ArcGIS Pro 2.8.1
# import python packages
import arcpy
import ImportTabularDataTool
import ImportSpatialDataTool
import GenerateRangeMapTool
import ListElementNationalIDsTool
import SyncSpeciesListBioticsTool
import AddSynonymsTool
import ImportExternalRangeReviewTool
import SyncSpeciesListKBATool
import BuildEBARDownloadTableTool
import BuildBulkDownloadTableTool
import ExportInputDataTool
import FlagBadDataUsingRangeTool
import DeleteRangeMapTool
import ImportVisitsTool
import SummarizeDownloadsTool
import PublishRangeMapTool
import PublishRangeMapSetsTool
import EBARUtils
import datetime
import locale
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the .pyt file)."""
self.label = 'EBAR Tools'
self.alias = ''
# List of tool classes associated with this toolbox
self.tools = [ImportTabularData, ImportSpatialData, GenerateRangeMap, ListElementNationalIDs,
SyncSpeciesListBiotics, AddSynonyms, ImportExternalRangeReview, SyncSpeciesListKBA,
BuildEBARDownloadTable, BuildBulkDownloadTable, ExportInputData, FlagBadDataUsingRange,
DeleteRangeMap, ImportVisits, SummarizeDownloads, PublishRangeMap, PublishRangeMapSets]
class ImportTabularData(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Import Tabular Data'
self.description = 'Imports tabular data into the InputDataset and InputPoint tables of the EBAR geodatabase'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Raw Data File
param_raw_data_file = arcpy.Parameter(
displayName='Raw Data File',
name='raw_data_file',
datatype='DEFile',
parameterType='Required',
direction='Input')
param_raw_data_file.filter.list = ['txt', 'csv']
# Dataset Name
param_dataset_name = arcpy.Parameter(
displayName='Dataset Name',
name='dataset_name',
datatype='GPString',
parameterType='Required',
direction='Input')
# Dataset Source
param_dataset_source = arcpy.Parameter(
displayName='Dataset Source',
name='dataset_source',
datatype='GPString',
parameterType='Required',
direction='Input')
# Date Received
param_date_received = arcpy.Parameter(
displayName='Date Received',
name='date_received',
datatype='GPDate',
parameterType='Required',
direction='Input')
locale.setlocale(locale.LC_ALL, '')
param_date_received.value = datetime.datetime.now().strftime('%x')
# Dataset Restrictions
param_dataset_restrictions = arcpy.Parameter(
displayName='Dataset Restrictions',
name='dataset_restrictions',
datatype='GPString',
parameterType='Required',
direction='Input')
param_dataset_restrictions.value = 'Non-restricted'
params = [param_geodatabase, param_raw_data_file, param_dataset_name, param_dataset_source,
param_date_received, param_dataset_restrictions]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
## Dataset Source needs to be a textbox, not a filtered picklist,
## because filtered picklists cannot be dynamic when published to a geoprocessing service
#if parameters[0].altered and parameters[0].value:
# parameters[3].filter.list = EBARUtils.readDatasetSources(parameters[0].valueAsText, "('T')")
domains = arcpy.da.ListDomains(parameters[0].valueAsText)
restrictions_list = []
for domain in domains:
if domain.name == 'Restriction':
restrictions_list = list(domain.codedValues.values())
parameters[5].filter.list = sorted(restrictions_list)
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called "
"after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
itd = ImportTabularDataTool.ImportTabularDataTool()
itd.runImportTabularDataTool(parameters, messages)
return
class ImportSpatialData(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Import Spatial Data'
self.description = 'Imports spatial data from a shapefile or feature class into the InputDataset table of ' + \
'the EBAR geodatabase and one of the InputPolygon, InputPoint or InputLine feature classes'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Feature Class to Import
param_import_feature_class = arcpy.Parameter(
displayName='Import Feature Class',
name='import_feature_class',
datatype='GPFeatureLayer',
parameterType='Required',
direction='Input')
param_import_feature_class.filter.list = ['Point', 'Multipoint', 'Polyline', 'Polygon', 'MultiPatch']
# Dataset Name
param_dataset_name = arcpy.Parameter(
displayName='Dataset Name',
name='dataset_name',
datatype='GPString',
parameterType='Required',
direction='Input')
# Dataset Source
# - used to check for uniqueness of records using provided IDs
# - one field map can be shared among multiple sources
param_dataset_source = arcpy.Parameter(
displayName='Dataset Source',
name='dataset_source',
datatype='GPString',
parameterType='Required',
direction='Input')
# Date Received
param_date_received = arcpy.Parameter(
displayName='Date Received',
name='date_received',
datatype='GPDate',
parameterType='Required',
direction='Input')
locale.setlocale(locale.LC_ALL, '')
param_date_received.value = datetime.datetime.now().strftime('%x')
# Dataset Restrictions
param_dataset_restrictions = arcpy.Parameter(
displayName='Dataset Restrictions',
name='dataset_restrictions',
datatype='GPString',
parameterType='Required',
direction='Input')
param_dataset_restrictions.value = 'Non-restricted'
params = [param_geodatabase, param_import_feature_class, param_dataset_name, param_dataset_source,
param_date_received, param_dataset_restrictions]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
## Dataset Source needs to be a textbox, not a filtered picklist,
## because filtered picklists cannot be dynamic when published to a geoprocessing service
#if parameters[0].altered and parameters[0].value:
# parameters[3].filter.list = EBARUtils.readDatasetSources(parameters[0].valueAsText, "('S', 'L', 'P')")
domains = arcpy.da.ListDomains(parameters[0].valueAsText)
restrictions_list = []
for domain in domains:
if domain.name == 'Restriction':
restrictions_list = list(domain.codedValues.values())
parameters[5].filter.list = sorted(restrictions_list)
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
isd = ImportSpatialDataTool.ImportSpatialDataTool()
isd.runImportSpatialDataTool(parameters, messages)
return
class GenerateRangeMap(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Generate Range Map'
self.description = 'Generate Range Map for a species from available spatial data'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Species
param_species = arcpy.Parameter(
displayName='Species Scientific Name',
name='species',
datatype='GPString',
parameterType='Required',
direction='Input')
# Secondary Species
param_secondary = arcpy.Parameter(
displayName='Secondary Species',
name='secondary_species',
datatype='GPString',
parameterType='Optional',
direction='Input',
multiValue=True)
# Range Version
param_version = arcpy.Parameter(
displayName='Range Version',
name='range_version',
datatype='GPString',
parameterType='Required',
direction='Input')
param_version.value = '1.0'
# Range Stage
param_stage = arcpy.Parameter(
displayName='Range Stage',
name='range_stage',
datatype='GPString',
parameterType='Required',
direction='Input')
# cannot pre-specify the list if you want to allow a value not in the list
#param_stage.filter.list = ['Auto-generated', 'Expert reviewed', 'Published']
param_stage.value = 'Auto-generated'
# Scope
param_scope = arcpy.Parameter(
displayName='Scope',
name='scope',
datatype='GPString',
parameterType='Optional',
direction='Input')
param_scope.filter.list = ['Canadian', 'Global', 'North American']
# Jurisdictions Covered
param_jurisdictions_covered = arcpy.Parameter(
displayName='Jurisdictions Covered',
name='jurisdictions_covered',
datatype='GPString',
parameterType='Optional',
direction='Input',
multiValue=True)
# Custom Polygons Covered
param_custom_polygons_covered = arcpy.Parameter(
displayName='Custom Polygons Covered',
name='custom_polygons_covered',
datatype='GPFeatureLayer',
parameterType='Optional',
direction='Input')
param_custom_polygons_covered.filter.list = ['Polygon', 'MultiPatch']
params = [param_geodatabase, param_species, param_secondary, param_version, param_stage, param_scope,
param_jurisdictions_covered, param_custom_polygons_covered]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
# filter list of species
# make sure there is a geodatabase specified
## species need to be textboxes, not filtered picklists,
## because filtered picklists cannot be dynamic when published to a geoprocessing service
#if parameters[0].altered and parameters[0].value:
# param_geodatabase = parameters[0].valueAsText
# spec_list = []
# with arcpy.da.SearchCursor(param_geodatabase + '/BIOTICS_ELEMENT_NATIONAL', ['NATIONAL_SCIENTIFIC_NAME'],
# sql_clause=(None,'ORDER BY NATIONAL_SCIENTIFIC_NAME')) as cursor:
# for row in EBARUtils.searchCursor(cursor):
# spec_list.append(row['NATIONAL_SCIENTIFIC_NAME'])
# if len(spec_list) > 0:
# del row
# parameters[1].filter.list = spec_list
# parameters[2].filter.list = spec_list
# allow a stage value in addition to the ones in the standard list
## only works for optional field when published to geoprocessing service
##if parameters[4].altered:
#stage_list = ['Auto-generated', 'Expert reviewed', 'Published']
#if parameters[4].value:
# if parameters[4].valueAsText not in stage_list:
# stage_list.append(parameters[4].valueAsText)
#parameters[4].filter.list = stage_list
# build list of jurisdictions (exclude AC, NF, LB because they are used for data only, not ecoshapes)
if parameters[0].altered and parameters[0].value:
param_geodatabase = parameters[0].valueAsText
jur_list = []
with arcpy.da.SearchCursor(param_geodatabase + '/Jurisdiction', ['JurisdictionName'],
"JurisdictionAbbreviation NOT IN ('AC', 'NF', 'LB')",
sql_clause=(None,'ORDER BY JurisdictionName')) as cursor:
for row in EBARUtils.searchCursor(cursor):
jur_list.append(row['JurisdictionName'])
if len(jur_list) > 0:
del row
parameters[6].filter.list = jur_list
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
grm = GenerateRangeMapTool.GenerateRangeMapTool()
grm.runGenerateRangeMapTool(parameters, messages)
return
class ListElementNationalIDs(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'List Element National IDs'
self.description = 'Generate a comma-separated list of ELEMENT_NATIONAL_ID values from existing ' + \
'BIOTICS_ELEMENT_NATIONAL table'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
## Output folder
#param_folder = arcpy.Parameter(
# displayName='Output Folder',
# name='output_folder',
# datatype='DEFolder',
# parameterType='Required',
# direction='Input')
params = [param_geodatabase]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
leni = ListElementNationalIDsTool.ListElementNationalIDsTool()
leni.runListElementNationalIDsTool(parameters, messages)
return
class SyncSpeciesListBiotics(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Sync Species List Biotics'
self.description = 'Synchronize the BIOTICS_NATIONAL_ELEMENT and Species tables with Biotics'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# CSV
param_csv = arcpy.Parameter(
displayName='CSV File',
name='csv_file',
datatype='DEFile',
parameterType='Required',
direction='Input')
param_csv.filter.list = ['txt', 'csv']
params = [param_geodatabase, param_csv]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
ssl = SyncSpeciesListBioticsTool.SyncSpeciesListBioticsTool()
ssl.runSyncSpeciesListBioticsTool(parameters, messages)
return
class AddSynonyms(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Add Synonyms'
self.description = 'Add BIOTICS Synonyms not already in the Species or Synonym tables'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# CSV
param_csv = arcpy.Parameter(
displayName='CSV File',
name='csv_file',
datatype='DEFile',
parameterType='Required',
direction='Input')
param_csv.filter.list = ['txt', 'csv']
params = [param_geodatabase, param_csv]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
ast = AddSynonymsTool.AddSynonymsTool()
ast.runAddSynonymsTool(parameters, messages)
return
class ImportExternalRangeReview(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Import External Range Review'
self.description = 'Create review records for an existing range map based on third-party polygons'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Species
param_species = arcpy.Parameter(
displayName='Species Scientific Name',
name='species',
datatype='GPString',
parameterType='Required',
direction='Input')
# Secondary Species
param_secondary = arcpy.Parameter(
displayName='Secondary Species',
name='secondary_species',
datatype='GPString',
parameterType='Optional',
direction='Input',
multiValue=True)
# Range Version
param_version = arcpy.Parameter(
displayName='Range Version',
name='range_version',
datatype='GPString',
parameterType='Required',
direction='Input')
param_version.value = '1.0'
# Range Stage
param_stage = arcpy.Parameter(
displayName='Range Stage',
name='range_stage',
datatype='GPString',
parameterType='Required',
direction='Input')
param_stage.value = 'Auto-generated'
# External Range Polygons
param_external_range_table = arcpy.Parameter(
displayName='External Range Table',
name='external_range_table',
datatype='GPTableView',
parameterType='Required',
direction='Input')
# Presence Field
param_presence_field = arcpy.Parameter(
displayName='Presence Field',
name='presence_field',
datatype='GPString',
parameterType='Optional',
direction='Input')
# Review Label
param_review_label = arcpy.Parameter(
displayName='Review Label',
name='review_label',
datatype='GPString',
parameterType='Required',
direction='Input')
# Jurisdictions Covered
param_jurisdictions_covered = arcpy.Parameter(
displayName='Jurisdictions Covered',
name='jurisdictions_covered',
datatype='GPString',
parameterType='Optional',
direction='Input',
multiValue=True)
# Username
param_username = arcpy.Parameter(
displayName='Username',
name='username',
datatype='GPString',
parameterType='Required',
direction='Input')
params = [param_geodatabase, param_species, param_secondary, param_version, param_stage,
param_external_range_table, param_presence_field, param_review_label, param_jurisdictions_covered,
param_username]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
# build list of jurisdictions (exclude AC, NF, LB because they are used for data only, not ecoshapes)
if parameters[0].altered and parameters[0].value:
param_geodatabase = parameters[0].valueAsText
jur_list = []
with arcpy.da.SearchCursor(param_geodatabase + '/Jurisdiction', ['JurisdictionName'],
"JurisdictionAbbreviation NOT IN ('AC', 'NF', 'LB')",
sql_clause=(None,'ORDER BY JurisdictionName')) as cursor:
for row in EBARUtils.searchCursor(cursor):
jur_list.append(row['JurisdictionName'])
if len(jur_list) > 0:
del row
parameters[8].filter.list = jur_list
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
ierr = ImportExternalRangeReviewTool.ImportExternalRangeReviewTool()
ierr.runImportExternalRangeReviewTool(parameters, messages)
return
class SyncSpeciesListKBA(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Sync Species List KBA'
self.description = 'Synchronize the Species tables with WCS KBA updates'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# CSV
param_csv = arcpy.Parameter(
displayName='CSV File',
name='csv_file',
datatype='DEFile',
parameterType='Required',
direction='Input')
param_csv.filter.list = ['txt', 'csv']
params = [param_geodatabase, param_csv]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
sslkba = SyncSpeciesListKBATool.SyncSpeciesListKBATool()
sslkba.runSyncSpeciesListKBATool(parameters, messages)
return
class BuildEBARDownloadTable(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Build EBAR Download Table'
self.description = 'Build html table of all Range Maps available for download'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
params = []
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
bedt = BuildEBARDownloadTableTool.BuildEBARDownloadTableTool()
bedt.runBuildEBARDownloadTableTool(parameters, messages)
return
class BuildBulkDownloadTable(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Build Bulk Download Table'
self.description = 'Build html table of all Category - Taxa Groups available for bulk download'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
params = []
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
bbdt = BuildBulkDownloadTableTool.BuildBulkDownloadTableTool()
bbdt.runBuildBulkDownloadTableTool(parameters, messages)
return
class ExportInputData(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Export Input Data'
self.description = 'Export InputPoint/Line/Polygon records, excluding "other" DatasetTypes and EBAR Restricted records'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Jurisdictions Covered
param_jurisdictions_covered = arcpy.Parameter(
displayName='Jurisdictions Covered',
name='jurisdictions_covered',
datatype='GPString',
parameterType='Required',
direction='Input',
multiValue=True)
# Include CDC Data
param_include_cdc = arcpy.Parameter(
displayName='Include CDC Data',
name='include_cdc',
datatype='GPBoolean',
parameterType='Required',
direction='Input')
param_include_cdc.value = 'false'
# Include Restricted Data
param_include_restricted = arcpy.Parameter(
displayName='Include Restricted Data',
name='include_restricted',
datatype='GPBoolean',
parameterType='Required',
direction='Input')
param_include_restricted.value = 'false'
## Include Other Dataset Types
#param_include_other = arcpy.Parameter(
# displayName='Include Other Dataset Types',
# name='include_other',
# datatype='GPBoolean',
# parameterType='Required',
# direction='Input')
#param_include_other.value = 'false'
# Output Zip File Name
param_output_zip = arcpy.Parameter(
displayName='Output Zip File Name',
name='output_zip',
datatype='GPString',
parameterType='Required',
direction='Input')
params = [param_geodatabase, param_jurisdictions_covered, param_include_cdc, param_include_restricted,
param_output_zip] # param_include_other,
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
# build list of jurisdictions (exclude Atlantic Canadian jurisdictions because they are lumped as AC)
if parameters[0].altered and parameters[0].value:
param_geodatabase = parameters[0].valueAsText
jur_list = []
with arcpy.da.SearchCursor(param_geodatabase + '/Jurisdiction', ['JurisdictionName'],
"JurisdictionAbbreviation NOT IN ('NL', 'NS', 'NB', 'PE', 'NF', 'LB')",
sql_clause=(None,'ORDER BY JurisdictionName')) as cursor:
for row in EBARUtils.searchCursor(cursor):
jur_list.append(row['JurisdictionName'])
if len(jur_list) > 0:
del row
parameters[1].filter.list = jur_list
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
ied = ExportInputDataTool.ExportInputDataTool()
ied.runExportInputDataTool(parameters, messages)
return
class FlagBadDataUsingRange(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Flag Bad Data Using Range'
self.description = 'Use reviewed range to identify and flag bad input data'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Range Map ID
param_range_map_id = arcpy.Parameter(
displayName='Range Map ID',
name='range_map_id',
datatype='GPLong',
parameterType='Required',
direction='Input')
params = [param_geodatabase, param_range_map_id]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
fbdur = FlagBadDataUsingRangeTool.FlagBadDataUsingRangeTool()
fbdur.runFlagBadDataUsingRangeTool(parameters, messages)
return
class DeleteRangeMap(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Delete Range Map'
self.description = 'Delete Range Map and related records from the EBAR geodatabase'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Range Map ID
param_range_map_id = arcpy.Parameter(
displayName='Range Map ID',
name='range_map_id',
datatype='GPLong',
parameterType='Required',
direction='Input')
params = [param_geodatabase, param_range_map_id]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
drm = DeleteRangeMapTool.DeleteRangeMapTool()
drm.runDeleteRangeMapTool(parameters, messages)
return
class ImportVisits(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Import Visits'
self.description = 'Imports visits and relates them to the appropriate InputPoint/Line/Polygon based on ' + \
'SFID and Subnation'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
# Geodatabase
param_geodatabase = arcpy.Parameter(
displayName='Geodatabase',
name='geodatabase',
datatype='DEWorkspace',
parameterType='Required',
direction='Input')
param_geodatabase.filter.list = ['Local Database', 'Remote Database']
# Raw Data File
param_raw_data_file = arcpy.Parameter(
displayName='Raw Data File',
name='raw_data_file',
datatype='DEFile',
parameterType='Required',
direction='Input')
param_raw_data_file.filter.list = ['txt', 'csv']
# Subnation
param_subnation = arcpy.Parameter(
displayName='Subnation',
name='subnation',
datatype='GPString',
parameterType='Required',
direction='Input')
params = [param_geodatabase, param_raw_data_file, param_subnation]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
# build list of subnations (Canadian only, with NL split into NF and LB)
if parameters[0].altered and parameters[0].value:
param_geodatabase = parameters[0].valueAsText
subnation_list = []
with arcpy.da.SearchCursor(param_geodatabase + '/Jurisdiction', ['JurisdictionName'],
"JurisdictionAbbreviation NOT IN ('AC', 'NL','US', 'MX')",
sql_clause=(None,'ORDER BY JurisdictionName')) as cursor:
for row in EBARUtils.searchCursor(cursor):
subnation_list.append(row['JurisdictionName'])
if len(subnation_list) > 0:
del row
parameters[2].filter.list = subnation_list
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called "
"after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
iv = ImportVisitsTool.ImportVisitsTool()
iv.runImportVisitsTool(parameters, messages)
return
class SummarizeDownloads(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Summarize Downloads'
self.description = 'Summarize downloads by month'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
params = []
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
sd = SummarizeDownloadsTool.SummarizeDownloadsTool()
sd.runSummarizeDownloadsTool(parameters, messages)
return
class PublishRangeMap(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Publish Range Map'
self.description = 'Publish one Range Map as JPG, PDF and GIS Data Zip'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
param_range_map_id = arcpy.Parameter(
displayName='Range Map ID',
name='range_map_id',
datatype='GPString',
parameterType='Required',
direction='Input')
param_spatial = arcpy.Parameter(
displayName='Output GIS Data Zip',
name='spatial',
datatype='GPBoolean',
parameterType='Required',
direction='Input')
param_spatial.value = 'true'
params = [param_range_map_id, param_spatial]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
prm = PublishRangeMapTool.PublishRangeMapTool()
prm.runPublishRangeMapTool(parameters, messages)
return
class PublishRangeMapSets(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = 'Publish Range Map Sets'
self.description = 'Create Zip sets of PDFs and Spatial Data per Category/Taxa'
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
param_category = arcpy.Parameter(
displayName='Category',
name='category',
datatype='GPString',
parameterType='Optional',
direction='Input')
param_taxagroup = arcpy.Parameter(
displayName='Taxa Group',
name='taxagroup',
datatype='GPString',
parameterType='Optional',
direction='Input')
params = [param_category, param_taxagroup]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal validation is performed. This method is
called whenever a parameter has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool parameter. This method is called
after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
prms = PublishRangeMapSetsTool.PublishRangeMapSetsTool()
prms.runPublishRangeMapSetsTool(parameters, messages)
return
| 38.351351
| 127
| 0.628568
| 4,398
| 45,408
| 6.399045
| 0.096407
| 0.02736
| 0.048858
| 0.057208
| 0.732402
| 0.723448
| 0.711758
| 0.700316
| 0.689834
| 0.686494
| 0
| 0.001879
| 0.285082
| 45,408
| 1,183
| 128
| 38.38377
| 0.865016
| 0.250793
| 0
| 0.752646
| 0
| 0.001323
| 0.158658
| 0.008336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136243
| false
| 0
| 0.063492
| 0
| 0.335979
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc57d3150c14090ddbb99264ce64be5a86d9f1ae
| 63,337
|
py
|
Python
|
fn_microsoft_security_graph/fn_microsoft_security_graph/util/customize.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
fn_microsoft_security_graph/fn_microsoft_security_graph/util/customize.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
fn_microsoft_security_graph/fn_microsoft_security_graph/util/customize.py
|
devsuds/resilient-community-apps
|
ce0b087a160dd1c2f86f8c261630b46ce6948ca2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_microsoft_security_graph"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_microsoft_security_graph package"""
reload_params = {"package": u"fn_microsoft_security_graph",
"incident_fields": [u"microsoft_security_graph_alert_id"],
"action_fields": [u"microsoft_security_graph_alert_assignedto", u"microsoft_security_graph_alert_closeddatetime", u"microsoft_security_graph_alert_comment", u"microsoft_security_graph_alert_feedback", u"microsoft_security_graph_alert_status", u"microsoft_security_graph_alert_tags", u"microsoft_security_graph_query_end_datetime", u"microsoft_security_graph_query_start_datetime"],
"function_params": [u"microsoft_security_graph_alert_data", u"microsoft_security_graph_alert_id", u"microsoft_security_graph_alert_search_query"],
"datatables": [],
"message_destinations": [u"microsoft_security_graph_message_destination"],
"functions": [u"microsoft_security_graph_alert_search", u"microsoft_security_graph_get_alert_details", u"microsoft_security_graph_update_alert"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_microsoft_security_graph_alert_search", u"example_microsoft_security_graph_get_alert_details", u"example_microsoft_security_graph_resolve_alert", u"example_microsoft_security_graph_update_alert"],
"actions": [u"Example: Microsoft Security Graph Alert Search", u"Example: Microsoft Security Graph Get Details", u"Example: Microsoft Security Graph Resolve Alert", u"Example: Microsoft Security Graph Update Alert"]
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Incident fields:
# microsoft_security_graph_alert_id
# Action fields:
# microsoft_security_graph_alert_assignedto
# microsoft_security_graph_alert_closeddatetime
# microsoft_security_graph_alert_comment
# microsoft_security_graph_alert_feedback
# microsoft_security_graph_alert_status
# microsoft_security_graph_alert_tags
# microsoft_security_graph_query_end_datetime
# microsoft_security_graph_query_start_datetime
# Function inputs:
# microsoft_security_graph_alert_data
# microsoft_security_graph_alert_id
# microsoft_security_graph_alert_search_query
# Message Destinations:
# microsoft_security_graph_message_destination
# Functions:
# microsoft_security_graph_alert_search
# microsoft_security_graph_get_alert_details
# microsoft_security_graph_update_alert
# Workflows:
# example_microsoft_security_graph_alert_search
# example_microsoft_security_graph_get_alert_details
# example_microsoft_security_graph_resolve_alert
# example_microsoft_security_graph_update_alert
# Rules:
# Example: Microsoft Security Graph Alert Search
# Example: Microsoft Security Graph Get Details
# Example: Microsoft Security Graph Resolve Alert
# Example: Microsoft Security Graph Update Alert
yield ImportDefinition(u"""
eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogImI4NWU1ZTk3LTQ4YmQt
NGZmMC05NGZiLTM5MDVhZWNmNDBmMCIsICJkZXNjcmlwdGlvbiI6ICJTZWFyY2hlcyB0aGUgTWlj
cm9zb2Z0IFNlY3VyaXR5IEdyYXBoIGFsZXJ0cyBmb3IgYWxlcnRzIHRoYXQgZml0IHRoZSBzZWFy
Y2gvZmlsdGVyIGNyaXRlcmlhLiIsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJleHBvcnRf
a2V5IjogImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3NlYXJjaCIsICJ3
b3JrZmxvd19pZCI6IDEzMywgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWRtaW5AY28zc3lzLmNvbSIs
ICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRG
LThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8y
MDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQ
TU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RE
LzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8y
MDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9i
cG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxu
czp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdl
dE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwi
ZXhhbXBsZV9taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfc2VhcmNoXCIgaXNFeGVjdXRh
YmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggQWxl
cnQgU2VhcmNoXCI+PGRvY3VtZW50YXRpb24+U2VhcmNoZXMgdGhlIE1pY3Jvc29mdCBTZWN1cml0
eSBHcmFwaCBhbGVydHMgZm9yIGFsZXJ0cyB0aGF0IGZpdCB0aGUgc2VhcmNoL2ZpbHRlciBjcml0
ZXJpYS48L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1c
Ij48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzFlZWRjdno8L291dGdvaW5nPjwvc3RhcnRFdmVudD48
c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18xOXUzN3JzXCIgbmFtZT1cIk1pY3Jvc29mdCBT
ZWN1cml0eSBHcmFwaCBBbGVydCBTZS4uLlwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48
ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiYzk2NjQ2NmYtYmM5
Ni00ODRhLWIzOGItZjY4ZTVmNDgwMzI3XCI+e1wiaW5wdXRzXCI6e30sXCJwb3N0X3Byb2Nlc3Np
bmdfc2NyaXB0XCI6XCJhbGVydHMgPSByZXN1bHRzLmNvbnRlbnQudmFsdWVcXG5ub3RlID0gXFxc
IlRoZXJlIGFyZSAmbHQ7YiZndDt7fSZsdDsvYiZndDsgYWxlcnRzIGJhc2VkIG9uIHRoZSBhcnRp
ZmFjdCBvZiB2YWx1ZSAmbHQ7YiZndDt7fSZsdDsvYiZndDsuXFxcIi5mb3JtYXQoc3RyKGxlbihh
bGVydHMpKSwgYXJ0aWZhY3QudmFsdWUpXFxuXFxuaWYgbGVuKGFsZXJ0cykgJmd0OyAwOlxcbiAg
bm90ZSA9IG5vdGUgKyBcXFwiJmx0O2JyJmd0OyZsdDtiJmd0O0FsZXJ0IGlkczombHQ7L2ImZ3Q7
XFxcIlxcbiAgZm9yIGFsZXJ0IGluIGFsZXJ0czpcXG4gICAgbm90ZSA9IG5vdGUgKyBcXFwiJmx0
O2JyJmd0Oy0ge31cXFwiLmZvcm1hdChhbGVydC5pZClcXG5cXG5pbmNpZGVudC5hZGROb3RlKGhl
bHBlci5jcmVhdGVSaWNoVGV4dChub3RlKSlcIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwi
aW1wb3J0IGphdmEudXRpbC5EYXRlIGFzIERhdGVcXG5cXG5zZWFyY2ggPSBcXFwiZmlsdGVyPVxc
XCJcXG5jb25qdW5jdGlvbiA9IEZhbHNlXFxuXFxuaWYgcnVsZS5wcm9wZXJ0aWVzLm1pY3Jvc29m
dF9zZWN1cml0eV9ncmFwaF9xdWVyeV9zdGFydF9kYXRldGltZTpcXG4gIHN0YXJ0ID0gRGF0ZShy
dWxlLnByb3BlcnRpZXMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3F1ZXJ5X3N0YXJ0X2RhdGV0
aW1lKVxcbiAgc3RhcnRfdHMgPSBzdHIoc3RhcnQudG9JbnN0YW50KCkpXFxuICBzdGFydF9maWx0
ZXIgPSBcXFwiY3JlYXRlZERhdGVUaW1lJTIwZ2UlMjB7fVxcXCIuZm9ybWF0KHN0YXJ0X3RzKVxc
biAgc2VhcmNoID0gc2VhcmNoICsgc3RhcnRfZmlsdGVyXFxuICBjb25qdW5jdGlvbiA9IFRydWVc
XG5cXG5pZiBydWxlLnByb3BlcnRpZXMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3F1ZXJ5X2Vu
ZF9kYXRldGltZTpcXG4gIGVuZCA9IERhdGUocnVsZS5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1
cml0eV9ncmFwaF9xdWVyeV9lbmRfZGF0ZXRpbWUpXFxuICBlbmRfdHMgPSBzdHIoZW5kLnRvSW5z
dGFudCgpKVxcbiAgZW5kX2ZpbHRlciA9IFxcXCJjcmVhdGVkRGF0ZVRpbWUlMjBsZSUyMHt9XFxc
Ii5mb3JtYXQoZW5kX3RzKVxcbiAgaWYgY29uanVuY3Rpb246IHNlYXJjaCA9IHNlYXJjaCArIFxc
XCIlMjBhbmQlMjBcXFwiXFxuICBzZWFyY2ggPSBzZWFyY2ggKyBlbmRfZmlsdGVyXFxuICBjb25q
dW5jdGlvbiA9IFRydWVcXG5cXG5pZiBhcnRpZmFjdC50eXBlID09IFxcXCJVc2VyIEFjY291bnRc
XFwiOlxcbiAgYXJ0aWZhY3RfZmlsdGVyID0gXFxcInVzZXJTdGF0ZXMvYW55KHVzZXI6JTIwdXNl
ci9hY2NvdW50TmFtZSUyMGVxJTIwJ3t9JylcXFwiLmZvcm1hdChhcnRpZmFjdC52YWx1ZSlcXG4g
IGlmIGNvbmp1bmN0aW9uOiBzZWFyY2ggPSBzZWFyY2ggKyBcXFwiJTIwYW5kJTIwXFxcIlxcbiAg
c2VhcmNoID0gc2VhcmNoICsgYXJ0aWZhY3RfZmlsdGVyXFxuICBjb25qdW5jdGlvbiA9IFRydWVc
XG5cXG5cXG5pbnB1dHMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3NlYXJjaF9xdWVy
eSA9IHNlYXJjaFwiLFwicmVzdWx0X25hbWVcIjpcIlwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwv
ZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18xZWVkY3Z6PC9pbmNvbWlu
Zz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzE0bmx0NHM8L291dGdvaW5nPjwvc2VydmljZVRhc2s+
PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xNG5sdDRzXCIgc291cmNlUmVmPVwiU2Vy
dmljZVRhc2tfMTl1Mzdyc1wiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzB2OXVpenpcIi8+PGVuZEV2
ZW50IGlkPVwiRW5kRXZlbnRfMHY5dWl6elwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMTRubHQ0
czwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFl
ZWRjdnpcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2
aWNlVGFza18xOXUzN3JzXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr
eHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3Rh
dGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwi
U3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwi
Lz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBt
bmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFc
Ij48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlk
PVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdp
ZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6
Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9i
cG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u
RWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFr
eHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwi
OTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5F
bGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9k
aVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9
XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50
XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxl
bWVudD1cIlNlcnZpY2VUYXNrXzE5dTM3cnNcIiBpZD1cIlNlcnZpY2VUYXNrXzE5dTM3cnNfZGlc
Ij48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMjc4XCIgeT1c
IjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1c
IlNlcXVlbmNlRmxvd18xNG5sdDRzXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMTRubHQ0c19kaVwiPjxv
bWdkaTp3YXlwb2ludCB4PVwiMzc4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZc
Ii8+PG9tZ2RpOndheXBvaW50IHg9XCI0ODBcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c
IjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lk
dGg9XCI5MFwiIHg9XCIzODRcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBt
bmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMHY5
dWl6elwiIGlkPVwiRW5kRXZlbnRfMHY5dWl6el9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi
MzZcIiB3aWR0aD1cIjM2XCIgeD1cIjQ4MFwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+
PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNDUzXCIgeT1cIjIy
N1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVk
Z2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWVlZGN2elwiIGlkPVwiU2VxdWVuY2VGbG93
XzFlZWRjdnpfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6
UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMjc4XCIgeHNpOnR5cGU9XCJv
bWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBo
ZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMzhcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6
QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpC
UE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJleGFtcGxlX21pY3Jv
c29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF9zZWFyY2giLCAidmVyc2lvbiI6IDR9LCAibGFzdF9t
b2RpZmllZF90aW1lIjogMTU1MDYyOTY3MDU5NSwgImNyZWF0b3JfaWQiOiAiYWRtaW5AY28zc3lz
LmNvbSIsICJhY3Rpb25zIjogW10sICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX21pY3Jv
c29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF9zZWFyY2giLCAibmFtZSI6ICJFeGFtcGxlOiBNaWNy
b3NvZnQgU2VjdXJpdHkgR3JhcGggQWxlcnQgU2VhcmNoIn0sIHsidXVpZCI6ICJiMTUwNDFiZS0z
NDljLTRiOTYtYmY2OS05YTMwZGIyY2UzMjMiLCAiZGVzY3JpcHRpb24iOiAiVXBkYXRlcyBhbGVy
dCBmaWVsZHMgYmFzZWQgb24gaW5wdXQgcHJvdmlkZWQgaW4gdGhlIHBvcHVwIG1vZHVsZSB3aGVu
IHRoZSBydWxlIGlzIHRyaWdnZXJlZC4gXG5cbkZpZWxkcyB0aGF0IGNhbiBiZSB1cGRhdGVkOlxu
YXNzaWduZWRUb1xuY2xvc2VkRGF0ZVRpbWVcbmNvbW1lbnRzXG5mZWVkYmFja1xuc3RhdHVzXG50
YWdzIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9t
aWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfdXBkYXRlX2FsZXJ0IiwgIndvcmtmbG93X2lkIjogMTMy
LCAibGFzdF9tb2RpZmllZF9ieSI6ICJhZG1pbkBjbzNzeXMuY29tIiwgImNvbnRlbnQiOiB7Inht
bCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlv
bnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIg
eG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwi
IHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4
bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1s
bnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9
XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93
d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0
cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJleGFtcGxlX21pY3Jvc29m
dF9zZWN1cml0eV9ncmFwaF91cGRhdGVfYWxlcnRcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFt
ZT1cIkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBVcGRhdGUgQWxlcnRcIj48ZG9j
dW1lbnRhdGlvbj48IVtDREFUQVtVcGRhdGVzIGFsZXJ0IGZpZWxkcyBiYXNlZCBvbiBpbnB1dCBw
cm92aWRlZCBpbiB0aGUgcG9wdXAgbW9kdWxlIHdoZW4gdGhlIHJ1bGUgaXMgdHJpZ2dlcmVkLiBc
blxuRmllbGRzIHRoYXQgY2FuIGJlIHVwZGF0ZWQ6XG5hc3NpZ25lZFRvXG5jbG9zZWREYXRlVGlt
ZVxuY29tbWVudHNcbmZlZWRiYWNrXG5zdGF0dXNcbnRhZ3NdXT48L2RvY3VtZW50YXRpb24+PHN0
YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93
XzB2ZGp6YjY8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNl
VGFza18xNDhkaWt3XCIgbmFtZT1cIk1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBVcGRhdGUgQS4u
LlwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2ls
aWVudDpmdW5jdGlvbiB1dWlkPVwiOGU2NzUwODUtODIwZS00ZTVhLTk4M2EtMzk0M2I1NThiYTI2
XCI+e1wiaW5wdXRzXCI6e1wiZjI1ZDVhMGYtN2YzYS00NjE1LWI3ZTMtNDEzNThlMjk4OWI0XCI6
e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVj
dF92YWx1ZVwiOltdLFwidGV4dF9jb250ZW50X3ZhbHVlXCI6e1wiZm9ybWF0XCI6XCJ0ZXh0XCIs
XCJjb250ZW50XCI6XCJ7XFxuICAgICAgICBcXFwidmVuZG9ySW5mb3JtYXRpb25cXFwiOiB7XFxu
ICAgICAgICAgICAgXFxcInByb3ZpZGVyXFxcIjogXFxcIlN0cmluZ1xcXCIsXFxuICAgICAgICAg
ICAgXFxcInZlbmRvclxcXCI6IFxcXCJTdHJpbmdcXFwiXFxuICAgICAgICB9XFxuICAgIH1cIn19
fX0sXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbmNpZGVudC5hZGROb3RlKGhlbHBlci5j
cmVhdGVSaWNoVGV4dChcXFwiQWxlcnQgdXBkYXRlZDpcXFxcbnt9XFxcIi5mb3JtYXQocmVzdWx0
cy5jb250ZW50KSkpXFxuXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImltcG9ydCBqYXZh
LnV0aWwuRGF0ZSBhcyBEYXRlXFxuXFxuaW5wdXRzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9h
bGVydF9pZCA9IGluY2lkZW50LnByb3BlcnRpZXMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2Fs
ZXJ0X2lkXFxuXFxuYXNzaWduZWRUbyA9IHJ1bGUucHJvcGVydGllcy5taWNyb3NvZnRfc2VjdXJp
dHlfZ3JhcGhfYWxlcnRfYXNzaWduZWR0byBpZiBydWxlLnByb3BlcnRpZXMubWljcm9zb2Z0X3Nl
Y3VyaXR5X2dyYXBoX2FsZXJ0X2Fzc2lnbmVkdG8gZWxzZSBcXFwiXFxcIlxcbmlmIHJ1bGUucHJv
cGVydGllcy5taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfY2xvc2VkZGF0ZXRpbWU6XFxu
ICB0aW1lX3N0YW1wID0gcnVsZS5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9h
bGVydF9jbG9zZWRkYXRldGltZVxcbiAgZXBvY2hfdGltZSA9IERhdGUodGltZV9zdGFtcClcXG4g
IGNsb3NlZERhdGVUaW1lID0gXFxcIlxcXFxcXFwiY2xvc2VkRGF0ZVRpbWVcXFxcXFxcIjogXFxc
XFxcXCJ7MH1cXFxcXFxcIixcXFwiLmZvcm1hdChzdHIoZXBvY2hfdGltZS50b0luc3RhbnQoKSkp
XFxuZWxzZTpcXG4gIGNsb3NlZERhdGVUaW1lID0gXFxcIlxcXCJcXG5jb21tZW50ID0gcnVsZS5w
cm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF9jb21tZW50IGlmIHJ1bGUu
cHJvcGVydGllcy5taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfY29tbWVudCBlbHNlIFxc
XCJcXFwiXFxuaWYgcnVsZS5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVy
dF9mZWVkYmFjazpcXG4gIGZlZWRiYWNrID0gcnVsZS5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1
cml0eV9ncmFwaF9hbGVydF9mZWVkYmFja1xcbmVsaWYgd29ya2Zsb3cucHJvcGVydGllcy5tc2df
YWxlcnRfZGV0YWlscy5jb250ZW50LmZlZWRiYWNrOlxcbiAgZmVlZGJhY2sgPSB3b3JrZmxvdy5w
cm9wZXJ0aWVzLm1zZ19hbGVydF9kZXRhaWxzLmNvbnRlbnQuZmVlZGJhY2tcXG5lbHNlOlxcbiAg
ZmVlZGJhY2sgPSBcXFwiXFxcIlxcbnN0YXR1cyA9IHJ1bGUucHJvcGVydGllcy5taWNyb3NvZnRf
c2VjdXJpdHlfZ3JhcGhfYWxlcnRfc3RhdHVzIGlmIHJ1bGUucHJvcGVydGllcy5taWNyb3NvZnRf
c2VjdXJpdHlfZ3JhcGhfYWxlcnRfc3RhdHVzIGVsc2Ugd29ya2Zsb3cucHJvcGVydGllcy5tc2df
YWxlcnRfZGV0YWlscy5jb250ZW50LnN0YXR1c1xcbnRhZ3MgPSBydWxlLnByb3BlcnRpZXMubWlj
cm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3RhZ3MgaWYgcnVsZS5wcm9wZXJ0aWVzLm1pY3Jv
c29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF90YWdzIGVsc2UgXFxcIlxcXCJcXG5cXG5wcm92aWRl
ciA9IHdvcmtmbG93LnByb3BlcnRpZXMubXNnX2FsZXJ0X2RldGFpbHMuY29udGVudC52ZW5kb3JJ
bmZvcm1hdGlvbi5wcm92aWRlclxcbnZlbmRvciA9IHdvcmtmbG93LnByb3BlcnRpZXMubXNnX2Fs
ZXJ0X2RldGFpbHMuY29udGVudC52ZW5kb3JJbmZvcm1hdGlvbi52ZW5kb3JcXG5cXG5pZiB3b3Jr
Zmxvdy5wcm9wZXJ0aWVzLm1zZ19hbGVydF9kZXRhaWxzLmNvbnRlbnQuY29tbWVudHM6XFxuICBh
bGxfY29tbWVudHMgPSBsaXN0KHdvcmtmbG93LnByb3BlcnRpZXMubXNnX2FsZXJ0X2RldGFpbHMu
Y29udGVudC5jb21tZW50cylcXG5lbHNlOlxcbiAgYWxsX2NvbW1lbnRzID0gW1xcXCJcXFwiXVxc
bmFsbF9jb21tZW50cyA9IGFsbF9jb21tZW50cyArIFtjb21tZW50XVxcblxcbmlmIHdvcmtmbG93
LnByb3BlcnRpZXMubXNnX2FsZXJ0X2RldGFpbHMuY29udGVudC50YWdzOlxcbiAgYWxsX3RhZ3Mg
PSB3b3JrZmxvdy5wcm9wZXJ0aWVzLm1zZ19hbGVydF9kZXRhaWxzLmNvbnRlbnQudGFnc1xcbmVs
c2U6XFxuICBhbGxfdGFncyA9IFtcXFwiXFxcIl1cXG5hbGxfdGFncyA9IGFsbF90YWdzICsgW3Rh
Z3NdXFxuI1tcXFwiezV9XFxcIl0gICAgXFxuZGF0YSA9IHUnJyd7e1xcbiAgICAgICAgXFxcImFz
c2lnbmVkVG9cXFwiOiBcXFwiezB9XFxcIixcXG4gICAgICAgIHsxfVxcbiAgICAgICAgXFxcImNv
bW1lbnRzXFxcIjogezJ9LFxcbiAgICAgICAgXFxcImZlZWRiYWNrXFxcIjogXFxcInszfVxcXCIs
XFxuICAgICAgICBcXFwic3RhdHVzXFxcIjogXFxcIns0fVxcXCIsXFxuICAgICAgICBcXFwidGFn
c1xcXCI6IHs1fSxcXG4gICAgICAgIFxcXCJ2ZW5kb3JJbmZvcm1hdGlvblxcXCI6XFxuICAgICAg
ICB7e1xcbiAgICAgICAgICAgIFxcXCJwcm92aWRlclxcXCI6IFxcXCJ7Nn1cXFwiLFxcbiAgICAg
ICAgICAgIFxcXCJ2ZW5kb3JcXFwiOiBcXFwiezd9XFxcIlxcbiAgICAgICAgfX1cXG4gICAgfX0n
JycuZm9ybWF0KGFzc2lnbmVkVG8sIGNsb3NlZERhdGVUaW1lLCBhbGxfY29tbWVudHMsIGZlZWRi
YWNrLCBzdGF0dXMsIGFsbF90YWdzLCBwcm92aWRlciwgdmVuZG9yKVxcblxcbmlucHV0cy5taWNy
b3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfZGF0YSA9IGRhdGFcIixcInJlc3VsdF9uYW1lXCI6
XCJcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5T
ZXF1ZW5jZUZsb3dfMTl6dXk0dzwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xanN1
dGU1PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxlbmRFdmVudCBpZD1cIkVuZEV2ZW50XzF0bDBk
cG1cIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzFqc3V0ZTU8L2luY29taW5nPjwvZW5kRXZlbnQ+
PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xanN1dGU1XCIgc291cmNlUmVmPVwiU2Vy
dmljZVRhc2tfMTQ4ZGlrd1wiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzF0bDBkcG1cIi8+PHNlcnZp
Y2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDQxaGtqMVwiIG5hbWU9XCJNaWNyb3NvZnQgU2VjdXJp
dHkgR3JhcGggR2V0IEFsZXIuLi5cIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVu
c2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cImMwNDc2YjZkLTlkNTctNGE5
OC1iNzRhLWE4NjdiZGIzZjAzOVwiPntcImlucHV0c1wiOnt9LFwicHJlX3Byb2Nlc3Npbmdfc2Ny
aXB0XCI6XCJpbnB1dHMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkID0gaW5jaWRl
bnQucHJvcGVydGllcy5taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfaWRcIixcInJlc3Vs
dF9uYW1lXCI6XCJtc2dfYWxlcnRfZGV0YWlsc1wifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0
ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wdmRqemI2PC9pbmNvbWluZz48
b3V0Z29pbmc+U2VxdWVuY2VGbG93XzE5enV5NHc8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNl
cXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wdmRqemI2XCIgc291cmNlUmVmPVwiU3RhcnRF
dmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMDQxaGtqMVwiLz48c2VxdWVu
Y2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzE5enV5NHdcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz
a18wNDFoa2oxXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMTQ4ZGlrd1wiLz48dGV4dEFubm90
YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3Jr
ZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2Np
YXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJl
Zj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdy
YW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1
bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1l
bnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxv
bWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhc
Ii8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5
MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1O
U2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4
aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWln
aHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1O
U2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhc
IiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwi
IHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi
MTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVk
Z2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xNDhkaWt3XCIg
aWQ9XCJTZXJ2aWNlVGFza18xNDhkaWt3X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwi
IHdpZHRoPVwiMTAwXCIgeD1cIjQ3NlwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxi
cG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMXRsMGRwbVwiIGlkPVwiRW5k
RXZlbnRfMXRsMGRwbV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2
XCIgeD1cIjY4OFwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBo
ZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwiNjYxXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpC
UE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9
XCJTZXF1ZW5jZUZsb3dfMWpzdXRlNVwiIGlkPVwiU2VxdWVuY2VGbG93XzFqc3V0ZTVfZGlcIj48
b21nZGk6d2F5cG9pbnQgeD1cIjU3NlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2
XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNjg4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9
XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdp
ZHRoPVwiOTBcIiB4PVwiNTg3XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u
ZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18w
NDFoa2oxXCIgaWQ9XCJTZXJ2aWNlVGFza18wNDFoa2oxX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWln
aHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI3N1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBN
TlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMHZkanpi
NlwiIGlkPVwiU2VxdWVuY2VGbG93XzB2ZGp6YjZfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5
OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4
PVwiMjc3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1O
TGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMzcuNVwi
IHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6
QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTl6dXk0d1wiIGlkPVwiU2VxdWVu
Y2VGbG93XzE5enV5NHdfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjM3N1wiIHhzaTp0eXBlPVwi
b21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDc2XCIgeHNpOnR5
cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJv
dW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI0MjYuNVwiIHk9XCIxODRcIi8+PC9i
cG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2Jw
bW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ3b3JrZmxvd19pZCI6ICJleGFtcGxl
X21pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF91cGRhdGVfYWxlcnQiLCAidmVyc2lvbiI6IDM5fSwg
Imxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTA2ODQxNTAwNTgsICJjcmVhdG9yX2lkIjogImFkbWlu
QGNvM3N5cy5jb20iLCAiYWN0aW9ucyI6IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBs
ZV9taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfdXBkYXRlX2FsZXJ0IiwgIm5hbWUiOiAiRXhhbXBs
ZTogTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIFVwZGF0ZSBBbGVydCJ9LCB7InV1aWQiOiAiZTE3
ZjRhNmEtNDAxOC00ODBiLThkY2ItYzM1YjZhNzY0Njk2IiwgImRlc2NyaXB0aW9uIjogIlJldHVy
bnMgYWxsIGRldGFpbHMgb2YgYW4gYWxlcnQgYW5kIGFkZHMgYXJ0aWZhY3RzIG9mIGNlcnRhaW4g
dHlwZXMgYmFzZWQgb24gdGhlIHJlc3VsdHMuIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50Iiwg
ImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfZ2V0X2FsZXJ0
X2RldGFpbHMiLCAid29ya2Zsb3dfaWQiOiAxMzAsICJsYXN0X21vZGlmaWVkX2J5IjogImFkbWlu
QGNvM3N5cy5jb20iLCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBl
bmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9y
Zy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9t
Zy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9t
Zy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcu
b3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxp
ZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1M
U2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5z
dGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48
cHJvY2VzcyBpZD1cImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2dldF9hbGVydF9k
ZXRhaWxzXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9XCJFeGFtcGxlOiBNaWNyb3NvZnQg
U2VjdXJpdHkgR3JhcGggR2V0IEFsZXJ0IERldGFpbHNcIj48ZG9jdW1lbnRhdGlvbj5SZXR1cm5z
IGFsbCBkZXRhaWxzIG9mIGFuIGFsZXJ0IGFuZCBhZGRzIGFydGlmYWN0cyBvZiBjZXJ0YWluIHR5
cGVzIGJhc2VkIG9uIHRoZSByZXN1bHRzLjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1c
IlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMDJ1ank0Yjwvb3V0
Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFxcnEyejJc
IiBuYW1lPVwiTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIEdldCBBbGVyLi4uXCIgcmVzaWxpZW50
OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9u
IHV1aWQ9XCJjMDQ3NmI2ZC05ZDU3LTRhOTgtYjc0YS1hODY3YmRiM2YwMzlcIj57XCJpbnB1dHNc
Ijp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcInVzZXJfc3RhdGVzID0gcmVzdWx0cy5j
b250ZW50LnVzZXJTdGF0ZXNcXG5cXG5mb3Igc3RhdGUgaW4gdXNlcl9zdGF0ZXM6XFxuICBpZiBz
dGF0ZS5sb2dvbklwOlxcbiAgICBpbmNpZGVudC5hZGRBcnRpZmFjdChcXFwiSVAgQWRkcmVzc1xc
XCIsIHN0YXRlLmxvZ29uSXAsIFxcXCJcXFwiKVxcblxcbiAgaWYgc3RhdGUuYWNjb3VudE5hbWU6
XFxuICAgIGluY2lkZW50LmFkZEFydGlmYWN0KFxcXCJVc2VyIEFjY291bnRcXFwiLCBzdGF0ZS5h
Y2NvdW50TmFtZSwgXFxcIlxcXCIpXFxuICBcXG4gIGlmIHN0YXRlLnVzZXJQcmluY2lwYWxOYW1l
OlxcbiAgICBpbmNpZGVudC5hZGRBcnRpZmFjdChcXFwiVXNlciBBY2NvdW50XFxcIiwgc3RhdGUu
dXNlclByaW5jaXBhbE5hbWUsIFxcXCJcXFwiKVxcblwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0
XCI6XCJpbnB1dHMubWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkID0gaW5jaWRlbnQu
cHJvcGVydGllcy5taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfaWRcIixcInJlc3VsdF9u
YW1lXCI6XCJcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNv
bWluZz5TZXF1ZW5jZUZsb3dfMDJ1ank0YjwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxv
d18wd3lsOWJ6PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1
ZW5jZUZsb3dfMDJ1ank0YlwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdl
dFJlZj1cIlNlcnZpY2VUYXNrXzFxcnEyejJcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMWtv
cWRia1wiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMHd5bDliejwvaW5jb21pbmc+PC9lbmRFdmVu
dD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzB3eWw5YnpcIiBzb3VyY2VSZWY9XCJT
ZXJ2aWNlVGFza18xcXJxMnoyXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMWtvcWRia1wiLz48dGV4
dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91
ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwi
QXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRh
cmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBN
TkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1l
bnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBt
bkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9k
aVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9
XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lk
dGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k
aTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlv
bl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5k
cyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5k
aTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFz
ZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1c
IjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2lu
dCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6
QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xcXJx
MnoyXCIgaWQ9XCJTZXJ2aWNlVGFza18xcXJxMnoyX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9
XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjMwNlwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNo
YXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMDJ1ank0Ylwi
IGlkPVwiU2VxdWVuY2VGbG93XzAydWp5NGJfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwi
IHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi
MzA2XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFi
ZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyNTJcIiB5PVwi
MTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5T
aGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzFrb3FkYmtcIiBpZD1cIkVuZEV2ZW50XzFrb3Fk
YmtfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI1NTNc
IiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNc
IiB3aWR0aD1cIjBcIiB4PVwiNTcxXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9i
cG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZs
b3dfMHd5bDlielwiIGlkPVwiU2VxdWVuY2VGbG93XzB3eWw5YnpfZGlcIj48b21nZGk6d2F5cG9p
bnQgeD1cIjQwNlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3
YXlwb2ludCB4PVwiNTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJw
bW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9
XCI0NzkuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdl
PjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIs
ICJ3b3JrZmxvd19pZCI6ICJleGFtcGxlX21pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9nZXRfYWxl
cnRfZGV0YWlscyIsICJ2ZXJzaW9uIjogMn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTUwNjI5
NjAyNjc0LCAiY3JlYXRvcl9pZCI6ICJhZG1pbkBjbzNzeXMuY29tIiwgImFjdGlvbnMiOiBbXSwg
InByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2dl
dF9hbGVydF9kZXRhaWxzIiwgIm5hbWUiOiAiRXhhbXBsZTogTWljcm9zb2Z0IFNlY3VyaXR5IEdy
YXBoIEdldCBBbGVydCBEZXRhaWxzIn0sIHsidXVpZCI6ICI0MTk4YTU5Zi04ZDJkLTQ5ZjUtYmU5
NC1hOTFhZmVkMjZkMmUiLCAiZGVzY3JpcHRpb24iOiAiU2V0cyB0aGUgTWljcm9zb2Z0IFNlY3Vy
aXR5IEdyYXBoIGFsZXJ0IHN0YXR1cyB0byBSZXNvbHZlZCBhbmQgc2V0cyB0aGUgY2xvc2VkRGF0
ZVRpbWUgZmllbGQgdG8gdGhlIGN1cnJlbnQgZGF0ZSB0aW1lLiIsICJvYmplY3RfdHlwZSI6ICJp
bmNpZGVudCIsICJleHBvcnRfa2V5IjogImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBo
X3Jlc29sdmVfYWxlcnQiLCAid29ya2Zsb3dfaWQiOiAxMzEsICJsYXN0X21vZGlmaWVkX2J5Ijog
ImFkbWluQGNvM3N5cy5jb20iLCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIx
LjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cu
b21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8v
cmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIw
MDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hl
bWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rl
c3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3Jlc29s
dmVfYWxlcnRcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IE1pY3Jvc29m
dCBTZWN1cml0eSBHcmFwaCBSZXNvbHZlIEFsZXJ0XCI+PGRvY3VtZW50YXRpb24+U2V0cyB0aGUg
TWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIGFsZXJ0IHN0YXR1cyB0byBSZXNvbHZlZCBhbmQgc2V0
cyB0aGUgY2xvc2VkRGF0ZVRpbWUgZmllbGQgdG8gdGhlIGN1cnJlbnQgZGF0ZSB0aW1lLjwvZG9j
dW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2lu
Zz5TZXF1ZW5jZUZsb3dfMHFvbzY2cjwvb3V0Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFz
ayBpZD1cIlNlcnZpY2VUYXNrXzFwNW53dGNcIiBuYW1lPVwiTWljcm9zb2Z0IFNlY3VyaXR5IEdy
YXBoIFVwZGF0ZSBBLi4uXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25F
bGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI4ZTY3NTA4NS04MjBlLTRlNWEtOTgz
YS0zOTQzYjU1OGJhMjZcIj57XCJpbnB1dHNcIjp7XCJmMjVkNWEwZi03ZjNhLTQ2MTUtYjdlMy00
MTM1OGUyOTg5YjRcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwi
OntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJ0ZXh0X2NvbnRlbnRfdmFsdWVcIjp7XCJmb3Jt
YXRcIjpcInRleHRcIixcImNvbnRlbnRcIjpcIntcXG4gICAgICAgIFxcXCJzdGF0dXNcXFwiOiBc
XFwicmVzb2x2ZWRcXFwiLFxcbiAgICAgICAgXFxcInZlbmRvckluZm9ybWF0aW9uXFxcIjoge1xc
biAgICAgICAgICAgIFxcXCJwcm92aWRlclxcXCI6IFxcXCJTdHJpbmdcXFwiLFxcbiAgICAgICAg
ICAgIFxcXCJ2ZW5kb3JcXFwiOiBcXFwiU3RyaW5nXFxcIlxcbiAgICAgICAgfVxcbiAgICB9XCJ9
fX19LFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIu
Y3JlYXRlUmljaFRleHQoXFxcIkFsZXJ0IHVwZGF0ZWQ6XFxcXG57fVxcXCIuZm9ybWF0KHJlc3Vs
dHMuY29udGVudCkpKVxcblwiLFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbXBvcnQgamF2
YS51dGlsLkRhdGUgYXMgRGF0ZVxcblxcbmVwb2NoX3RpbWUgPSBEYXRlKClcXG5jbG9zZWREYXRl
VGltZSA9IHN0cihlcG9jaF90aW1lLnRvSW5zdGFudCgpKVxcblxcbnByb3ZpZGVyID0gd29ya2Zs
b3cucHJvcGVydGllcy5tc2dfYWxlcnRfZGV0YWlscy5jb250ZW50LnZlbmRvckluZm9ybWF0aW9u
LnByb3ZpZGVyXFxudmVuZG9yID0gd29ya2Zsb3cucHJvcGVydGllcy5tc2dfYWxlcnRfZGV0YWls
cy5jb250ZW50LnZlbmRvckluZm9ybWF0aW9uLnZlbmRvclxcbiAgICBcXG5kYXRhID0gJycne3tc
XG4gICAgICAgIFxcXCJjbG9zZWREYXRlVGltZVxcXCI6IFxcXCJ7MH1cXFwiLFxcbiAgICAgICAg
XFxcInN0YXR1c1xcXCI6IFxcXCJ7MX1cXFwiLFxcbiAgICAgICAgXFxcInZlbmRvckluZm9ybWF0
aW9uXFxcIjpcXG4gICAgICAgIHt7XFxuICAgICAgICAgICAgXFxcInByb3ZpZGVyXFxcIjogXFxc
InsyfVxcXCIsXFxuICAgICAgICAgICAgXFxcInZlbmRvclxcXCI6IFxcXCJ7M31cXFwiXFxuICAg
ICAgICB9fVxcbiAgICB9fScnJy5mb3JtYXQoY2xvc2VkRGF0ZVRpbWUsIFxcXCJyZXNvbHZlZFxc
XCIsIHByb3ZpZGVyLCB2ZW5kb3IpXFxuXFxuaW5wdXRzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFw
aF9hbGVydF9kYXRhID0gZGF0YVxcbmlucHV0cy5taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxl
cnRfaWQgPSBpbmNpZGVudC5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVy
dF9pZFwiLFwicmVzdWx0X25hbWVcIjpcIlwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5z
aW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18xOXd5azcwPC9pbmNvbWluZz48b3V0
Z29pbmc+U2VxdWVuY2VGbG93XzBqMnB0aTk8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PGVuZEV2
ZW50IGlkPVwiRW5kRXZlbnRfMXd5OTc2MVwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGoycHRp
OTwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBq
MnB0aTlcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18xcDVud3RjXCIgdGFyZ2V0UmVmPVwiRW5k
RXZlbnRfMXd5OTc2MVwiLz48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18xc3JhdGZoXCIg
bmFtZT1cIk1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBHZXQgQWxlci4uLlwiIHJlc2lsaWVudDp0
eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1
dWlkPVwiYzA0NzZiNmQtOWQ1Ny00YTk4LWI3NGEtYTg2N2JkYjNmMDM5XCI+e1wiaW5wdXRzXCI6
e30sXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5taWNyb3NvZnRfc2VjdXJpdHlf
Z3JhcGhfYWxlcnRfaWQgPSBpbmNpZGVudC5wcm9wZXJ0aWVzLm1pY3Jvc29mdF9zZWN1cml0eV9n
cmFwaF9hbGVydF9pZFwiLFwicmVzdWx0X25hbWVcIjpcIm1zZ19hbGVydF9kZXRhaWxzXCJ9PC9y
ZXNpbGllbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VG
bG93XzBxb282NnI8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMTl3eWs3MDwvb3V0
Z29pbmc+PC9zZXJ2aWNlVGFzaz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBxb282
NnJcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNl
VGFza18xc3JhdGZoXCIvPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMTl3eWs3MFwi
IHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzFzcmF0ZmhcIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFz
a18xcDVud3RjXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRc
Ij48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48
YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRF
dmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48L3By
b2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQ
TU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBt
bmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3Rh
cnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwi
MzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRz
IGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6
QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVu
dD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRf
ZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5
PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50
PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxv
bWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBc
Ii8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c
IjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1c
IlNlcnZpY2VUYXNrXzFwNW53dGNcIiBpZD1cIlNlcnZpY2VUYXNrXzFwNW53dGNfZGlcIj48b21n
ZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiNjQ0XCIgeT1cIjE2Nlwi
Lz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRF
dmVudF8xd3k5NzYxXCIgaWQ9XCJFbmRFdmVudF8xd3k5NzYxX2RpXCI+PG9tZ2RjOkJvdW5kcyBo
ZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiOTA0XCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQ
TU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCI4Nzdc
IiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k
aTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wajJwdGk5XCIgaWQ9XCJTZXF1
ZW5jZUZsb3dfMGoycHRpOV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNzQ0XCIgeHNpOnR5cGU9
XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI5MDRcIiB4c2k6
dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6
Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCI5MFwiIHg9XCI3NzlcIiB5PVwiMTg0XCIvPjwv
YnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1u
RWxlbWVudD1cIlNlcnZpY2VUYXNrXzFzcmF0ZmhcIiBpZD1cIlNlcnZpY2VUYXNrXzFzcmF0Zmhf
ZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzU1XCIg
eT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVu
dD1cIlNlcXVlbmNlRmxvd18wcW9vNjZyXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMHFvbzY2cl9kaVwi
PjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIy
MDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzNTVcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIg
eT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIg
d2lkdGg9XCIwXCIgeD1cIjI3Ni41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9i
cG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxv
d18xOXd5azcwXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMTl3eWs3MF9kaVwiPjxvbWdkaTp3YXlwb2lu
dCB4PVwiNDU1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndh
eXBvaW50IHg9XCI2NDRcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBt
bmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1c
IjU0OS41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+
PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+Iiwg
IndvcmtmbG93X2lkIjogImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3Jlc29sdmVf
YWxlcnQiLCAidmVyc2lvbiI6IDN9LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1MDYzMTIwNDY1
NCwgImNyZWF0b3JfaWQiOiAiYWRtaW5AY28zc3lzLmNvbSIsICJhY3Rpb25zIjogW10sICJwcm9n
cmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX21pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9yZXNvbHZl
X2FsZXJ0IiwgIm5hbWUiOiAiRXhhbXBsZTogTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIFJlc29s
dmUgQWxlcnQifV0sICJhY3Rpb25zIjogW3sibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6ICJF
eGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggQWxlcnQgU2VhcmNoIiwgInZpZXdfaXRl
bXMiOiBbeyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIs
ICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29u
dGVudCI6ICJiYjNmMTQxMS1hMTMyLTRiYmMtOGY1ZS1lYWRhZmUyNTM5NzQiLCAic3RlcF9sYWJl
bCI6IG51bGx9LCB7InNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0
aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIs
ICJjb250ZW50IjogIjNhOTFkNjJmLTk3ZTctNGQ1YS1iMjkzLTYwODU5OGUxYzlkZiIsICJzdGVw
X2xhYmVsIjogbnVsbH1dLCAidHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfbWljcm9z
b2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3NlYXJjaCJdLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZh
Y3QiLCAidGltZW91dF9zZWNvbmRzIjogODY0MDAsICJ1dWlkIjogIjMwODNlYjBkLTMzMWQtNGM5
OC1iNzk3LWJhOWE3N2UzMzVmYyIsICJhdXRvbWF0aW9ucyI6IFtdLCAiZXhwb3J0X2tleSI6ICJF
eGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggQWxlcnQgU2VhcmNoIiwgImNvbmRpdGlv
bnMiOiBbeyJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsLCAiZmllbGRfbmFtZSI6
ICJhcnRpZmFjdC50eXBlIiwgIm1ldGhvZCI6ICJpbiIsICJ2YWx1ZSI6IFsiVXNlciBBY2NvdW50
Il19XSwgImlkIjogMTU5LCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXX0sIHsibG9naWNfdHlw
ZSI6ICJhbGwiLCAibmFtZSI6ICJFeGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggR2V0
IERldGFpbHMiLCAidmlld19pdGVtcyI6IFtdLCAidHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBbImV4
YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2dldF9hbGVydF9kZXRhaWxzIl0sICJvYmpl
Y3RfdHlwZSI6ICJpbmNpZGVudCIsICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAi
NmEyMmI0MGUtMGJjMS00MTlkLTg2YTYtNDA4MTc2NGY5OTBhIiwgImF1dG9tYXRpb25zIjogW10s
ICJleHBvcnRfa2V5IjogIkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBHZXQgRGV0
YWlscyIsICJjb25kaXRpb25zIjogW10sICJpZCI6IDE2MCwgIm1lc3NhZ2VfZGVzdGluYXRpb25z
IjogW119LCB7ImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm5hbWUiOiAiRXhhbXBsZTogTWljcm9zb2Z0
IFNlY3VyaXR5IEdyYXBoIFJlc29sdmUgQWxlcnQiLCAidmlld19pdGVtcyI6IFtdLCAidHlwZSI6
IDAsICJ3b3JrZmxvd3MiOiBbImV4YW1wbGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3Jlc29s
dmVfYWxlcnQiXSwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInRpbWVvdXRfc2Vjb25kcyI6
IDg2NDAwLCAidXVpZCI6ICI2YmY3MDQzNC1iNTc4LTQ2MDItYjY2MC1hNGJjODMyY2EwMDMiLCAi
YXV0b21hdGlvbnMiOiBbXSwgImV4cG9ydF9rZXkiOiAiRXhhbXBsZTogTWljcm9zb2Z0IFNlY3Vy
aXR5IEdyYXBoIFJlc29sdmUgQWxlcnQiLCAiY29uZGl0aW9ucyI6IFt7InR5cGUiOiBudWxsLCAi
ZXZhbHVhdGlvbl9pZCI6IG51bGwsICJmaWVsZF9uYW1lIjogImluY2lkZW50LnByb3BlcnRpZXMu
bWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkIiwgIm1ldGhvZCI6ICJoYXNfYV92YWx1
ZSIsICJ2YWx1ZSI6IG51bGx9LCB7InR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGws
ICJmaWVsZF9uYW1lIjogImluY2lkZW50LnBsYW5fc3RhdHVzIiwgIm1ldGhvZCI6ICJjaGFuZ2Vk
X3RvIiwgInZhbHVlIjogIkNsb3NlZCJ9XSwgImlkIjogMTYxLCAibWVzc2FnZV9kZXN0aW5hdGlv
bnMiOiBbXX0sIHsibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6ICJFeGFtcGxlOiBNaWNyb3Nv
ZnQgU2VjdXJpdHkgR3JhcGggVXBkYXRlIEFsZXJ0IiwgInZpZXdfaXRlbXMiOiBbeyJzaG93X2lm
IjogbnVsbCwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJzaG93X2xpbmtfaGVh
ZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJkOTU4NDZi
ZC05NmNkLTQ1YWItYTljYi01YThiOTZmYWRkNGQiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNo
b3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0aW9uIiwgInNob3dfbGlu
a19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImM4
OWViNGIxLTg1MDItNDc0Ny1iNGRjLWViNTllNjk4NzI4YSIsICJzdGVwX2xhYmVsIjogbnVsbH0s
IHsic2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogImFjdGlvbmludm9jYXRpb24iLCAic2hv
d19saW5rX2hlYWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQi
OiAiYjVjM2FiODUtOWY1MC00NTFlLTllNmEtOTc1NmEyZGZhNmIwIiwgInN0ZXBfbGFiZWwiOiBu
dWxsfSwgeyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIs
ICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29u
dGVudCI6ICJjZWZjMTk5OC0zMmQxLTQ4YWYtYTkzMi1iOTcyNzIyNDI4YWIiLCAic3RlcF9sYWJl
bCI6IG51bGx9LCB7InNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJhY3Rpb25pbnZvY2F0
aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIs
ICJjb250ZW50IjogImQ4NzQxM2RkLTFlZGYtNDVhMy04ZjRlLTMxZmIwYjA0MjYzNyIsICJzdGVw
X2xhYmVsIjogbnVsbH0sIHsic2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogImFjdGlvbmlu
dm9jYXRpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91
dWlkIiwgImNvbnRlbnQiOiAiNTEwNmZlZGYtY2RhMS00M2U1LWI1YTctZGQ3ODc4MTBlMTVlIiwg
InN0ZXBfbGFiZWwiOiBudWxsfV0sICJ0eXBlIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9t
aWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfdXBkYXRlX2FsZXJ0Il0sICJvYmplY3RfdHlwZSI6ICJp
bmNpZGVudCIsICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiMDYzY2IxNjUtM2M4
MS00NzZmLTk4NmYtZjkzNWIxNWQ3MzUzIiwgImF1dG9tYXRpb25zIjogW10sICJleHBvcnRfa2V5
IjogIkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBVcGRhdGUgQWxlcnQiLCAiY29u
ZGl0aW9ucyI6IFtdLCAiaWQiOiAxNTgsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdfV0sICJs
YXlvdXRzIjogW10sICJleHBvcnRfZm9ybWF0X3ZlcnNpb24iOiAyLCAiaWQiOiA1MiwgImluZHVz
dHJpZXMiOiBudWxsLCAicGhhc2VzIjogW10sICJhY3Rpb25fb3JkZXIiOiBbXSwgImdlb3MiOiBu
dWxsLCAibG9jYWxlIjogbnVsbCwgInNlcnZlcl92ZXJzaW9uIjogeyJtYWpvciI6IDMxLCAidmVy
c2lvbiI6ICIzMS4wLjQyNTQiLCAiYnVpbGRfbnVtYmVyIjogNDI1NCwgIm1pbm9yIjogMH0sICJ0
aW1lZnJhbWVzIjogbnVsbCwgIndvcmtzcGFjZXMiOiBbXSwgImF1dG9tYXRpY190YXNrcyI6IFtd
LCAiZnVuY3Rpb25zIjogW3siZGlzcGxheV9uYW1lIjogIk1pY3Jvc29mdCBTZWN1cml0eSBHcmFw
aCBBbGVydCBTZWFyY2giLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAiU2VhcmNoIGFjcm9z
cyBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggZm9yIGFsZXJ0cyB3aGljaCBtYXRjaCB0aGUgY29y
cmVzcG9uZGluZyBzZWFyY2ggZmlsdGVycy4iLCAiZm9ybWF0IjogInRleHQifSwgImNyZWF0b3Ii
OiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwg
ImlkIjogMSwgIm5hbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmlld19pdGVtcyI6IFt7InNo
b3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFk
ZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogImQyMDEyNTEy
LTliOWUtNGE0Zi04ZWQyLWI1Mzc2YzEyZDU3OCIsICJzdGVwX2xhYmVsIjogbnVsbH1dLCAiZXhw
b3J0X2tleSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfc2VhcmNoIiwgInV1aWQi
OiAiYzk2NjQ2NmYtYmM5Ni00ODRhLWIzOGItZjY4ZTVmNDgwMzI3IiwgImxhc3RfbW9kaWZpZWRf
YnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNpbGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2Vy
IiwgImlkIjogMSwgIm5hbWUiOiAiYWRtaW5AY28zc3lzLmNvbSJ9LCAidmVyc2lvbiI6IDEsICJ3
b3JrZmxvd3MiOiBbeyJkZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJhcnRpZmFj
dCIsICJhY3Rpb25zIjogW10sICJuYW1lIjogIkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBH
cmFwaCBBbGVydCBTZWFyY2giLCAid29ya2Zsb3dfaWQiOiAxMzMsICJwcm9ncmFtbWF0aWNfbmFt
ZSI6ICJleGFtcGxlX21pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF9zZWFyY2giLCAidXVp
ZCI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTAyNTgyMTQ0NDAsICJkZXN0aW5h
dGlvbl9oYW5kbGUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX21lc3NhZ2VfZGVzdGluYXRp
b24iLCAiaWQiOiA4MywgIm5hbWUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3Nl
YXJjaCJ9LCB7ImRpc3BsYXlfbmFtZSI6ICJNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggR2V0IEFs
ZXJ0IERldGFpbHMiLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAiR2V0IHRoZSBkZXRhaWxz
IG9mIGFuIGFsZXJ0IGZyb20gdGhlIE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBBUEkuIiwgImZv
cm1hdCI6ICJ0ZXh0In0sICJjcmVhdG9yIjogeyJkaXNwbGF5X25hbWUiOiAiUmVzaWxpZW50IFN5
c2FkbWluIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDEsICJuYW1lIjogImFkbWluQGNvM3N5cy5j
b20ifSwgInZpZXdfaXRlbXMiOiBbeyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19m
dW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1
aWQiLCAiY29udGVudCI6ICJhMTg0MWYzYy1jNTEwLTQyZDctOWE0Ni1kNjM4ZmI3MWE5NzkiLCAi
c3RlcF9sYWJlbCI6IG51bGx9XSwgImV4cG9ydF9rZXkiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dy
YXBoX2dldF9hbGVydF9kZXRhaWxzIiwgInV1aWQiOiAiYzA0NzZiNmQtOWQ1Ny00YTk4LWI3NGEt
YTg2N2JkYjNmMDM5IiwgImxhc3RfbW9kaWZpZWRfYnkiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNp
bGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogMSwgIm5hbWUiOiAiYWRtaW5A
Y28zc3lzLmNvbSJ9LCAidmVyc2lvbiI6IDEsICJ3b3JrZmxvd3MiOiBbeyJkZXNjcmlwdGlvbiI6
IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rpb25zIjogW10sICJuYW1lIjog
IkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBHZXQgQWxlcnQgRGV0YWlscyIsICJ3
b3JrZmxvd19pZCI6IDEzMCwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1wbGVfbWljcm9zb2Z0
X3NlY3VyaXR5X2dyYXBoX2dldF9hbGVydF9kZXRhaWxzIiwgInV1aWQiOiBudWxsfSwgeyJkZXNj
cmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rpb25zIjogW10s
ICJuYW1lIjogIkV4YW1wbGU6IE1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBSZXNvbHZlIEFsZXJ0
IiwgIndvcmtmbG93X2lkIjogMTMxLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9taWNy
b3NvZnRfc2VjdXJpdHlfZ3JhcGhfcmVzb2x2ZV9hbGVydCIsICJ1dWlkIjogbnVsbH0sIHsiZGVz
Y3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9ucyI6IFtd
LCAibmFtZSI6ICJFeGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggVXBkYXRlIEFsZXJ0
IiwgIndvcmtmbG93X2lkIjogMTMyLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9taWNy
b3NvZnRfc2VjdXJpdHlfZ3JhcGhfdXBkYXRlX2FsZXJ0IiwgInV1aWQiOiBudWxsfV0sICJsYXN0
X21vZGlmaWVkX3RpbWUiOiAxNTUwMjU4MjE0NDQwLCAiZGVzdGluYXRpb25faGFuZGxlIjogIm1p
Y3Jvc29mdF9zZWN1cml0eV9ncmFwaF9tZXNzYWdlX2Rlc3RpbmF0aW9uIiwgImlkIjogODQsICJu
YW1lIjogIm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9nZXRfYWxlcnRfZGV0YWlscyJ9LCB7ImRp
c3BsYXlfbmFtZSI6ICJNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggVXBkYXRlIEFsZXJ0IiwgImRl
c2NyaXB0aW9uIjogeyJjb250ZW50IjogIlVwZGF0ZSBhbiBhbGVydCBpbiB0aGUgTWljcm9zb2Z0
IFNlY3VyaXR5IEdyYXBoLiIsICJmb3JtYXQiOiAidGV4dCJ9LCAiY3JlYXRvciI6IHsiZGlzcGxh
eV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAxLCAi
bmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6IG51
bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNl
LCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYTE4NDFmM2MtYzUxMC00MmQ3
LTlhNDYtZDYzOGZiNzFhOTc5IiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVs
bCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2Us
ICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJmMjVkNWEwZi03ZjNhLTQ2MTUt
YjdlMy00MTM1OGUyOTg5YjQiLCAic3RlcF9sYWJlbCI6IG51bGx9XSwgImV4cG9ydF9rZXkiOiAi
bWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3VwZGF0ZV9hbGVydCIsICJ1dWlkIjogIjhlNjc1MDg1
LTgyMGUtNGU1YS05ODNhLTM5NDNiNTU4YmEyNiIsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNw
bGF5X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDEs
ICJuYW1lIjogImFkbWluQGNvM3N5cy5jb20ifSwgInZlcnNpb24iOiAxLCAid29ya2Zsb3dzIjog
W3siZGVzY3JpcHRpb24iOiBudWxsLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiYWN0aW9u
cyI6IFtdLCAibmFtZSI6ICJFeGFtcGxlOiBNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggUmVzb2x2
ZSBBbGVydCIsICJ3b3JrZmxvd19pZCI6IDEzMSwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1w
bGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3Jlc29sdmVfYWxlcnQiLCAidXVpZCI6IG51bGx9
LCB7ImRlc2NyaXB0aW9uIjogbnVsbCwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImFjdGlv
bnMiOiBbXSwgIm5hbWUiOiAiRXhhbXBsZTogTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIFVwZGF0
ZSBBbGVydCIsICJ3b3JrZmxvd19pZCI6IDEzMiwgInByb2dyYW1tYXRpY19uYW1lIjogImV4YW1w
bGVfbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3VwZGF0ZV9hbGVydCIsICJ1dWlkIjogbnVsbH1d
LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1MDI1ODIxNDQ0MCwgImRlc3RpbmF0aW9uX2hhbmRs
ZSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfbWVzc2FnZV9kZXN0aW5hdGlvbiIsICJpZCI6
IDg1LCAibmFtZSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfdXBkYXRlX2FsZXJ0In1dLCAi
bm90aWZpY2F0aW9ucyI6IG51bGwsICJyZWd1bGF0b3JzIjogbnVsbCwgImluY2lkZW50X3R5cGVz
IjogW3siY3JlYXRlX2RhdGUiOiAxNTUwNjg3NDQ1MzU1LCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9t
aXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwgImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlv
biBQYWNrYWdlcyAoaW50ZXJuYWwpIiwgImlkIjogMCwgIm5hbWUiOiAiQ3VzdG9taXphdGlvbiBQ
YWNrYWdlcyAoaW50ZXJuYWwpIiwgInVwZGF0ZV9kYXRlIjogMTU1MDY4NzQ0NTM1NSwgInV1aWQi
OiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0YWEwIiwgImVuYWJsZWQiOiBmYWxz
ZSwgInN5c3RlbSI6IGZhbHNlLCAicGFyZW50X2lkIjogbnVsbCwgImhpZGRlbiI6IGZhbHNlfV0s
ICJzY3JpcHRzIjogW10sICJ0eXBlcyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJ1
dWlkIjogIjQ1YWY0YWZlLTdhYzEtNDFlZC1hNWJjLWNiZGE5ODI0YmIyNyIsICJleHBvcnRfa2V5
IjogIm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9tZXNzYWdlX2Rlc3RpbmF0aW9uIiwgIm5hbWUi
OiAiTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIE1lc3NhZ2UgRGVzdGluYXRpb24iLCAiZGVzdGlu
YXRpb25fdHlwZSI6IDAsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlf
Z3JhcGhfbWVzc2FnZV9kZXN0aW5hdGlvbiIsICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjog
WyJhZG1pbkBjbzNzeXMuY29tIl19XSwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJy
b2xlcyI6IFtdLCAiZmllbGRzIjogW3sib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDAsICJv
cGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIEFs
ZXJ0IElEIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAi
Y2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDQwMiwgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6
ICJiY2M3ZjJjNi03ZjFmLTRiYjctOGUyNS0zNTg2ZmU2MTFhZmMiLCAiY2hvc2VuIjogZmFsc2Us
ICJpbnB1dF90eXBlIjogInRleHQiLCAidG9vbHRpcCI6ICJJRCBvZiBhbiBhbGVydCBmcm9tIE1p
Y3Jvc29mdCBTZWN1cml0eSBHcmFwaCIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0Ijog
ZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvbWljcm9zb2Z0
X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJw
bGFjZWhvbGRlciI6ICJGaWVsZCBmb3IgTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIEFsZXJ0IElE
IiwgIm5hbWUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkIiwgImRlcHJlY2F0
ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjog
W119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiA2LCAib3BlcmF0aW9uX3Blcm1zIjog
e30sICJ0ZXh0IjogIk1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBBbGVydCBhc3NpZ25lZFRvIiwg
ImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAiY2hhbmdlYWJs
ZSI6IHRydWUsICJpZCI6IDM3MSwgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJkOTU4NDZi
ZC05NmNkLTQ1YWItYTljYi01YThiOTZmYWRkNGQiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90
eXBlIjogInRleHQiLCAidG9vbHRpcCI6ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4
dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogImFjdGlvbmludm9jYXRp
b24vbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2Fzc2lnbmVkdG8iLCAiaGlkZV9ub3Rp
ZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogIm1pY3Jvc29mdF9z
ZWN1cml0eV9ncmFwaF9hbGVydF9hc3NpZ25lZHRvIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRl
ZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlv
bnMiOiBbXSwgInR5cGVfaWQiOiA2LCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIk1p
Y3Jvc29mdCBTZWN1cml0eSBHcmFwaCBBbGVydCBmZWVkYmFjayIsICJibGFua19vcHRpb24iOiB0
cnVlLCAicHJlZml4IjogInByb3BlcnRpZXMiLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDM3
NywgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJjZWZjMTk5OC0zMmQxLTQ4YWYtYTkzMi1i
OTcyNzIyNDI4YWIiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogInNlbGVjdCIsICJ0
b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1w
bGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9taWNyb3NvZnRfc2Vj
dXJpdHlfZ3JhcGhfYWxlcnRfZmVlZGJhY2siLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwg
InBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogIm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVy
dF9mZWVkYmFjayIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2
ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFt7InV1aWQiOiAiMmExMDBkYTktMmU5NC00NmZmLWJmZTUt
MGNmZTczNDMyNDRmIiwgImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0cnVlLCAidmFsdWUi
OiA0NTAsICJsYWJlbCI6ICJ1bmtub3duIiwgImhpZGRlbiI6IGZhbHNlLCAicHJvcGVydGllcyI6
IG51bGx9LCB7InV1aWQiOiAiODM3ZTU2M2YtMGU5Zi00YWU3LTk5NmMtMzQyZDkwNTc3MTZjIiwg
ImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0cnVlLCAidmFsdWUiOiA0NTEsICJsYWJlbCI6
ICJ0cnVlUG9zaXRpdmUiLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVsbH0sIHsi
dXVpZCI6ICJjYmQyMjU1MC1jYjEzLTQyNjQtYjU2Ny05NTg4NDJjNmE1ODciLCAiZGVmYXVsdCI6
IGZhbHNlLCAiZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDQ1MiwgImxhYmVsIjogImZhbHNlUG9z
aXRpdmUiLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVsbH0sIHsidXVpZCI6ICIz
NDIxOThlMC1hZWI2LTQzZGMtOWMzMS04YmYxZTY1MGIwODMiLCAiZGVmYXVsdCI6IGZhbHNlLCAi
ZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDQ1MywgImxhYmVsIjogImJlbmlnblBvc2l0aXZlIiwg
ImhpZGRlbiI6IGZhbHNlLCAicHJvcGVydGllcyI6IG51bGx9XX0sIHsib3BlcmF0aW9ucyI6IFtd
LCAidHlwZV9pZCI6IDYsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiTWljcm9zb2Z0
IFNlY3VyaXR5IEdyYXBoIEFsZXJ0IHN0YXR1cyIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInBy
ZWZpeCI6ICJwcm9wZXJ0aWVzIiwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzNzgsICJyZWFk
X29ubHkiOiBmYWxzZSwgInV1aWQiOiAiZDg3NDEzZGQtMWVkZi00NWEzLThmNGUtMzFmYjBiMDQy
NjM3IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAidG9vbHRpcCI6
ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjog
W10sICJleHBvcnRfa2V5IjogImFjdGlvbmludm9jYXRpb24vbWljcm9zb2Z0X3NlY3VyaXR5X2dy
YXBoX2FsZXJ0X3N0YXR1cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xk
ZXIiOiAiIiwgIm5hbWUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X3N0YXR1cyIs
ICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwg
InZhbHVlcyI6IFt7InV1aWQiOiAiNmMxNzUwNjgtZTJmYy00NzdlLWE4MWItZjUxNDAwMTU3Mjhm
IiwgImRlZmF1bHQiOiB0cnVlLCAiZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDQ1NCwgImxhYmVs
IjogInVua25vd24iLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVsbH0sIHsidXVp
ZCI6ICI5MzU1MTVlOC1jYWZhLTRhNmUtYTNlOS01YTBhODM1ZmYxOTUiLCAiZGVmYXVsdCI6IGZh
bHNlLCAiZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDQ1NSwgImxhYmVsIjogIm5ld0FsZXJ0Iiwg
ImhpZGRlbiI6IGZhbHNlLCAicHJvcGVydGllcyI6IG51bGx9LCB7InV1aWQiOiAiNzM4ZDc4MzIt
YjI1OS00YmZjLTk2ZGEtMjY0YzdhZjJiNjBmIiwgImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQi
OiB0cnVlLCAidmFsdWUiOiA0NTYsICJsYWJlbCI6ICJpblByb2dyZXNzIiwgImhpZGRlbiI6IGZh
bHNlLCAicHJvcGVydGllcyI6IG51bGx9LCB7InV1aWQiOiAiZTc1NzFjMGEtZDFjZC00ZGY1LTlk
ZjUtYTgzODY4MThiYjIwIiwgImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0cnVlLCAidmFs
dWUiOiA0NTcsICJsYWJlbCI6ICJyZXNvbHZlZCIsICJoaWRkZW4iOiBmYWxzZSwgInByb3BlcnRp
ZXMiOiBudWxsfV19LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiA2LCAib3BlcmF0aW9u
X3Blcm1zIjoge30sICJ0ZXh0IjogIk1pY3Jvc29mdCBTZWN1cml0eSBHcmFwaCBBbGVydCBjbG9z
ZWREYXRlVGltZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6ICJwcm9wZXJ0aWVz
IiwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzNzYsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1
aWQiOiAiYzg5ZWI0YjEtODUwMi00NzQ3LWI0ZGMtZWI1OWU2OTg3MjhhIiwgImNob3NlbiI6IGZh
bHNlLCAiaW5wdXRfdHlwZSI6ICJkYXRldGltZXBpY2tlciIsICJ0b29sdGlwIjogIiIsICJpbnRl
cm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9y
dF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRf
Y2xvc2VkZGF0ZXRpbWUiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVy
IjogIiIsICJuYW1lIjogIm1pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF9jbG9zZWRkYXRl
dGltZSIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBm
YWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogNiwgIm9w
ZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJNaWNyb3NvZnQgU2VjdXJpdHkgR3JhcGggQWxl
cnQgdGFncyIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwg
ImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzNzUsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQi
OiAiNTEwNmZlZGYtY2RhMS00M2U1LWI1YTctZGQ3ODc4MTBlMTVlIiwgImNob3NlbiI6IGZhbHNl
LCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2Us
ICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJhY3Rp
b25pbnZvY2F0aW9uL21pY3Jvc29mdF9zZWN1cml0eV9ncmFwaF9hbGVydF90YWdzIiwgImhpZGVf
bm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJtaWNyb3Nv
ZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfdGFncyIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZh
dWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25z
IjogW10sICJ0eXBlX2lkIjogNiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJNaWNy
b3NvZnQgU2VjdXJpdHkgR3JhcGggUXVlcnkgU3RhcnQgRGF0ZVRpbWUiLCAiYmxhbmtfb3B0aW9u
IjogZmFsc2UsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlk
IjogMzc5LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImJiM2YxNDExLWExMzItNGJiYy04
ZjVlLWVhZGFmZTI1Mzk3NCIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAiZGF0ZXRp
bWVwaWNrZXIiLCAidG9vbHRpcCI6ICIiLCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6
IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogImFjdGlvbmludm9jYXRpb24v
bWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX3F1ZXJ5X3N0YXJ0X2RhdGV0aW1lIiwgImhpZGVfbm90
aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJtaWNyb3NvZnRf
c2VjdXJpdHlfZ3JhcGhfcXVlcnlfc3RhcnRfZGF0ZXRpbWUiLCAiZGVwcmVjYXRlZCI6IGZhbHNl
LCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsib3Bl
cmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDYsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQi
OiAiTWljcm9zb2Z0IFNlY3VyaXR5IEdyYXBoIEFsZXJ0IGNvbW1lbnQiLCAiYmxhbmtfb3B0aW9u
IjogZmFsc2UsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlk
IjogMjc1LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImI1YzNhYjg1LTlmNTAtNDUxZS05
ZTZhLTk3NTZhMmRmYTZiMCIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIs
ICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0
ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9taWNyb3NvZnRf
c2VjdXJpdHlfZ3JhcGhfYWxlcnRfY29tbWVudCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNl
LCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2Fs
ZXJ0X2NvbW1lbnQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy
dmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6
IDYsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiTWljcm9zb2Z0IFNlY3VyaXR5IEdy
YXBoIFF1ZXJ5IEVuZCBEYXRlVGltZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6
ICJwcm9wZXJ0aWVzIiwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzODAsICJyZWFkX29ubHki
OiBmYWxzZSwgInV1aWQiOiAiM2E5MWQ2MmYtOTdlNy00ZDVhLWIyOTMtNjA4NTk4ZTFjOWRmIiwg
ImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJkYXRldGltZXBpY2tlciIsICJ0b29sdGlw
IjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMi
OiBbXSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9taWNyb3NvZnRfc2VjdXJpdHlf
Z3JhcGhfcXVlcnlfZW5kX2RhdGV0aW1lIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJw
bGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfcXVlcnlf
ZW5kX2RhdGV0aW1lIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3Nl
cnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQi
OiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJtaWNyb3NvZnRfc2VjdXJpdHlf
Z3JhcGhfYWxlcnRfZGF0YSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGws
ICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMjczLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlk
IjogImYyNWQ1YTBmLTdmM2EtNDYxNS1iN2UzLTQxMzU4ZTI5ODliNCIsICJjaG9zZW4iOiBmYWxz
ZSwgImlucHV0X3R5cGUiOiAidGV4dGFyZWEiLCAidG9vbHRpcCI6ICJKU09OIHN0cmluZyBvZiBk
YXRhIHRvIHVwZGF0ZSBhbiBhbGVydCB3aXRoLiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90
ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbeyJ1dWlkIjogIjY4M2M2MGNmLWQzOTQtNGNmYy1i
N2FkLTY2YjcyNDI3MjE4YyIsICJpZCI6IDE1LCAidGVtcGxhdGUiOiB7ImNvbnRlbnQiOiAie1xu
ICAgICAgICBcInN0YXR1c1wiOiBcInJlc29sdmVkXCIsXG4gICAgICAgIFwidmVuZG9ySW5mb3Jt
YXRpb25cIjoge1xuICAgICAgICAgICAgXCJwcm92aWRlclwiOiBcIlN0cmluZ1wiLFxuICAgICAg
ICAgICAgXCJ2ZW5kb3JcIjogXCJTdHJpbmdcIlxuICAgICAgICB9XG4gICAgfSIsICJmb3JtYXQi
OiAidGV4dCJ9LCAibmFtZSI6ICJSZXNvbHZlIEFsZXJ0In0sIHsidXVpZCI6ICI5ZmZjMDRjNy1k
YWQ2LTQ5ZjQtYTc4MS00YjcyMDViNGJmZmYiLCAiaWQiOiAxNCwgInRlbXBsYXRlIjogeyJjb250
ZW50IjogIntcbiAgICAgICAgXCJ2ZW5kb3JJbmZvcm1hdGlvblwiOiB7XG4gICAgICAgICAgICBc
InByb3ZpZGVyXCI6IFwiU3RyaW5nXCIsXG4gICAgICAgICAgICBcInZlbmRvclwiOiBcIlN0cmlu
Z1wiXG4gICAgICAgIH1cbiAgICB9IiwgImZvcm1hdCI6ICJ0ZXh0In0sICJuYW1lIjogIlZlbmRv
ciBJbmZvIE5lZWRlZCJ9XSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9taWNyb3NvZnRfc2Vj
dXJpdHlfZ3JhcGhfYWxlcnRfZGF0YSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxh
Y2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAibWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2Rh
dGEiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFs
c2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBb
XSwgInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJtaWNyb3Nv
ZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfc2VhcmNoX3F1ZXJ5IiwgImJsYW5rX29wdGlvbiI6IGZh
bHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAzNzAsICJyZWFk
X29ubHkiOiBmYWxzZSwgInV1aWQiOiAiZDIwMTI1MTItOWI5ZS00YTRmLThlZDItYjUzNzZjMTJk
NTc4IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAi
U3RyaW5nIHRvIGZpbHRlciBhbGVydCBzZWFyY2ggcmVzdWx0cyBvbiIsICJpbnRlcm5hbCI6IGZh
bHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAi
X19mdW5jdGlvbi9taWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfc2VhcmNoX3F1ZXJ5Iiwg
ImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICJmaWx0ZXI9YXNzaWdu
ZWRUbyBlcSAnYW5hbHlzdEBtMzY1eDU5NDY1MS5vbm1pY3Jvc29mdC5jb20nIGFuZCBzZXZlcml0
eSBlcSAnaGlnaCciLCAibmFtZSI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfc2Vh
cmNoX3F1ZXJ5IiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZl
ciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAx
MSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJtaWNyb3NvZnRfc2VjdXJpdHlfZ3Jh
cGhfYWxlcnRfaWQiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hh
bmdlYWJsZSI6IHRydWUsICJpZCI6IDI3MiwgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJh
MTg0MWYzYy1jNTEwLTQyZDctOWE0Ni1kNjM4ZmI3MWE5NzkiLCAiY2hvc2VuIjogZmFsc2UsICJp
bnB1dF90eXBlIjogInRleHQiLCAidG9vbHRpcCI6ICJJRCBvZiBhbiBhbGVydC4iLCAiaW50ZXJu
YWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRf
a2V5IjogIl9fZnVuY3Rpb24vbWljcm9zb2Z0X3NlY3VyaXR5X2dyYXBoX2FsZXJ0X2lkIiwgImhp
ZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJtaWNy
b3NvZnRfc2VjdXJpdHlfZ3JhcGhfYWxlcnRfaWQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVm
YXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFs
dWVzIjogW119XSwgIm92ZXJyaWRlcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNTUwNjg3NDE0OTI4
fQ==
"""
)
| 75.132859
| 402
| 0.970223
| 1,295
| 63,337
| 47.295753
| 0.73668
| 0.014156
| 0.018319
| 0.011462
| 0.030385
| 0.020605
| 0.01427
| 0.01058
| 0.01058
| 0.01058
| 0
| 0.12281
| 0.023967
| 63,337
| 843
| 403
| 75.132859
| 0.867953
| 0.024914
| 0
| 0.010038
| 1
| 0
| 0.986029
| 0.968233
| 0
| 1
| 0
| 0
| 0
| 1
| 0.002509
| false
| 0
| 0.003764
| 0
| 0.007528
| 0.001255
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc64553455d0117c472f31755f339df30f6d7ac1
| 28
|
py
|
Python
|
neuralizer/tests/test_param_record.py
|
BeckResearchLab/Neuralizer
|
70826e684f395796752dcdb891a28b35512a9632
|
[
"MIT"
] | 1
|
2019-04-23T15:54:14.000Z
|
2019-04-23T15:54:14.000Z
|
neuralizer/tests/test_param_record.py
|
BeckResearchLab/Neuralizer
|
70826e684f395796752dcdb891a28b35512a9632
|
[
"MIT"
] | 1
|
2019-05-08T01:13:14.000Z
|
2019-05-08T01:13:14.000Z
|
neuralizer/tests/test_param_record.py
|
BeckResearchLab/Neuralizer
|
70826e684f395796752dcdb891a28b35512a9632
|
[
"MIT"
] | 1
|
2018-11-01T17:12:30.000Z
|
2018-11-01T17:12:30.000Z
|
import param_record as pr
| 7
| 25
| 0.785714
| 5
| 28
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 28
| 3
| 26
| 9.333333
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bc6e214dca62856a69f5a5ed0b27e146ddf22fc4
| 1,768
|
py
|
Python
|
tests/musictree/grandstaff/test_add_dynamics.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | 2
|
2020-06-22T13:33:28.000Z
|
2020-12-30T15:09:00.000Z
|
tests/musictree/grandstaff/test_add_dynamics.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | 37
|
2020-02-18T12:15:00.000Z
|
2021-12-13T20:01:14.000Z
|
tests/musictree/grandstaff/test_add_dynamics.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | null | null | null |
import os
from musicscore.musicstream.streamvoice import SimpleFormat
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musicxmlunittest import XMLTestCase
path = str(os.path.abspath(__file__).split('.')[0])
class Test(XMLTestCase):
def setUp(self) -> None:
self.score = TreeScoreTimewise()
def test_1(self):
# lyric
sf_1 = SimpleFormat(quarter_durations=[2, 2], midis=[0, 71])
sf_1.chords[1].add_dynamics('ff')
sf_1.to_stream_voice().add_to_score(self.score, staff_number=1)
sf_2 = SimpleFormat(quarter_durations=[2, 2], midis=[71, 0])
sf_2.chords[0].add_dynamics('p')
sf_2.to_stream_voice().add_to_score(self.score, staff_number=2)
xml_path = path + '_test_1.xml'
self.score.write(xml_path)
self.assertCompareFiles(xml_path)
def test_2(self):
sf_1 = SimpleFormat(quarter_durations=[2, 2], midis=[0, 71])
sf_1.chords[1].add_dynamics('f', placement='above')
sf_1.to_stream_voice(1).add_to_score(self.score, staff_number=1)
sf_2 = SimpleFormat(quarter_durations=[2, 2], midis=[60, 0])
sf_2.chords[0].add_dynamics('p')
sf_2.to_stream_voice(2).add_to_score(self.score, staff_number=1)
sf_1 = SimpleFormat(quarter_durations=[2, 2], midis=[0, 71])
sf_1.chords[1].add_dynamics('ff', placement='above')
sf_1.to_stream_voice(1).add_to_score(self.score, staff_number=2)
sf_2 = SimpleFormat(quarter_durations=[2, 2], midis=[60, 0])
sf_2.chords[0].add_dynamics('pp')
sf_2.to_stream_voice(2).add_to_score(self.score, staff_number=2)
xml_path = path + '_test_2.xml'
self.score.write(xml_path)
self.assertCompareFiles(xml_path)
| 38.434783
| 72
| 0.674208
| 263
| 1,768
| 4.243346
| 0.190114
| 0.072581
| 0.150538
| 0.155914
| 0.740143
| 0.737455
| 0.737455
| 0.737455
| 0.736559
| 0.735663
| 0
| 0.04805
| 0.187783
| 1,768
| 45
| 73
| 39.288889
| 0.729109
| 0.002828
| 0
| 0.323529
| 0
| 0
| 0.02385
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc90de4b136b6e4eb9ad5d415ffe8fbc4b5abd07
| 89
|
py
|
Python
|
news/main/__init__.py
|
bensammwaniki/News-site
|
ed759e3238830ac6f42adbf7ee46c767c399ec10
|
[
"MIT"
] | null | null | null |
news/main/__init__.py
|
bensammwaniki/News-site
|
ed759e3238830ac6f42adbf7ee46c767c399ec10
|
[
"MIT"
] | null | null | null |
news/main/__init__.py
|
bensammwaniki/News-site
|
ed759e3238830ac6f42adbf7ee46c767c399ec10
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main', __name__)
from .import view,errors
| 17.8
| 34
| 0.775281
| 12
| 89
| 5.416667
| 0.666667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134831
| 89
| 4
| 35
| 22.25
| 0.844156
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
bcadbcb35fbc51c3861f1daf922bb3f433650df9
| 152
|
py
|
Python
|
portfolio/messages.py
|
rkisdp/rkisdp.django.backend
|
771481cdeea6a101305c4819b06b839266ce6921
|
[
"MIT"
] | null | null | null |
portfolio/messages.py
|
rkisdp/rkisdp.django.backend
|
771481cdeea6a101305c4819b06b839266ce6921
|
[
"MIT"
] | null | null | null |
portfolio/messages.py
|
rkisdp/rkisdp.django.backend
|
771481cdeea6a101305c4819b06b839266ce6921
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# python imports
from __future__ import unicode_literals
# lib imports
from django.utils.translation import ugettext_lazy as _
| 21.714286
| 55
| 0.769737
| 20
| 152
| 5.5
| 0.85
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.144737
| 152
| 6
| 56
| 25.333333
| 0.838462
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcf508e5b0eccf3113cd9add6d725740c45145a1
| 53
|
py
|
Python
|
src/fffs/core/__init__.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/fffs/core/__init__.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
src/fffs/core/__init__.py
|
awacha/fffs
|
6a3ce47c5381ae33fe02909754750ff7f0cf2b8c
|
[
"BSD-3-Clause"
] | null | null | null |
from .model import ParameterDefinition, ModelFunction
| 53
| 53
| 0.886792
| 5
| 53
| 9.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d5b8c3ff0d7c9281035ea8d37d64debf8ff81f4c
| 153
|
py
|
Python
|
src/common/__init__.py
|
PrimatElite/ml-labs
|
16221d376529f580992467b426177bbde584af2d
|
[
"MIT"
] | null | null | null |
src/common/__init__.py
|
PrimatElite/ml-labs
|
16221d376529f580992467b426177bbde584af2d
|
[
"MIT"
] | 17
|
2021-09-18T16:46:10.000Z
|
2021-12-17T19:43:46.000Z
|
src/common/__init__.py
|
PrimatElite/ml-labs
|
16221d376529f580992467b426177bbde584af2d
|
[
"MIT"
] | null | null | null |
from .image import load_image
from .object import Object, PIXELS_PER_MM
from .object_search import find_object
from .objects_packing import pack_objects
| 30.6
| 41
| 0.856209
| 24
| 153
| 5.166667
| 0.541667
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 153
| 4
| 42
| 38.25
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d5f953cc54419ccb6a31d5cbf3194635320cd561
| 163
|
py
|
Python
|
rsync_storage/settings_rsync_sample.py
|
g10k/sw_rsync_storage
|
a25a3fe4e640421a359f5c272bb16dcb507c6187
|
[
"MIT"
] | null | null | null |
rsync_storage/settings_rsync_sample.py
|
g10k/sw_rsync_storage
|
a25a3fe4e640421a359f5c272bb16dcb507c6187
|
[
"MIT"
] | null | null | null |
rsync_storage/settings_rsync_sample.py
|
g10k/sw_rsync_storage
|
a25a3fe4e640421a359f5c272bb16dcb507c6187
|
[
"MIT"
] | null | null | null |
RSYNC_HOSTS = [
{'host': 'current', 'media_root': '/opt/media/', 'prefix': 'mc1_'},
{'host': '172.17.0.2', 'media_root': '/opt/media/', 'prefix': 'mc2_'}
]
| 40.75
| 73
| 0.546012
| 21
| 163
| 4
| 0.666667
| 0.214286
| 0.285714
| 0.404762
| 0.547619
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064286
| 0.141104
| 163
| 4
| 74
| 40.75
| 0.535714
| 0
| 0
| 0
| 0
| 0
| 0.530488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91332d4ebca977073ae22ea347a9ed6d9af0fc8f
| 107
|
py
|
Python
|
Python/helloWorld.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
Python/helloWorld.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
Python/helloWorld.py
|
JoaoRobertoFernandes/Code-to-study
|
ac0a69035f5aa124b4ef789bc884342dc93e6fc8
|
[
"MIT"
] | null | null | null |
#Hello World
print("Hello World")
#Triangle
print(" /|")
print(" / |")
print(" / |")
print("/___|")
| 9.727273
| 20
| 0.523364
| 10
| 107
| 5.3
| 0.4
| 0.566038
| 0.566038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196262
| 107
| 10
| 21
| 10.7
| 0.616279
| 0.17757
| 0
| 0.6
| 0
| 0
| 0.369048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
913beb49ef91d1337794570e5eefd39c9c5428cc
| 24,292
|
py
|
Python
|
cellpack/mgl_tools/DejaVu/scenarioInterface/Tests/test_animations.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
cellpack/mgl_tools/DejaVu/scenarioInterface/Tests/test_animations.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | 21
|
2021-10-02T00:07:05.000Z
|
2022-03-30T00:02:10.000Z
|
cellpack/mgl_tools/DejaVu/scenarioInterface/Tests/test_animations.py
|
mesoscope/cellpack
|
ec6b736fc706c1fae16392befa814b5337a3a692
|
[
"MIT"
] | null | null | null |
from DejaVu import Viewer
from DejaVu.Spheres import Spheres
from DejaVu.IndexedPolylines import IndexedPolylines
from DejaVu.Materials import propertyNum
from time import sleep
import unittest
import sys
#declare the 'showwarning' variable that is used in the code returned by maa.getSourceCode()
showwarning = False
class CustomAnimations_Tests(unittest.TestCase):
def setUp(self):
"""Create DejaVu Viewer
"""
#if not hasattr(self, "vi"):
self.vi = Viewer()
def tearDown(self):
"""
clean-up
"""
try:
self.vi.Exit()
except:
pass
def test_flyin(self):
"""Tests:
- creation of FlyInObjectMAA with different options (number
of keyframes, direction)
- playing different frames of maa . """
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
vi.AddObject(sph)
from DejaVu.scenarioInterface.animations import FlyInObjectMAA
# fly in from left
maa1 = FlyInObjectMAA(sph, objectName=None, direction='left', kfpos=[0, 30])
actors = maa1.actors
self.assertEqual(len(actors), 3)
vi.OneRedraw()
sph.Set(translation=[0,0,0])
# check that the position (translation) of the object changes from left to center
# of the viewer at frames 0 - 15 - 30
maa1.setValuesAt(0)
t1 = sph.translation[0]
vi.OneRedraw()
self.assertEqual(t1 < 0, True)
maa1.setValuesAt(15)
t2 = sph.translation[0]
self.assertEqual( int(t1/2), int(t2))
vi.OneRedraw()
maa1.setValuesAt(30)
t3 = sph.translation[0]
self.assertEqual(t3, 0)
vi.OneRedraw()
# fly in from right
maa2 = FlyInObjectMAA(sph, objectName=None, direction='right', kfpos=[0, 60])
actors = maa2.actors
self.assertEqual(len(actors), 3)
sph.Set(translation=[0,0,0])
# check that the position (translation) of the object changes from right to center
# of the viewer at frames 0 - 30- 60
maa2.setValuesAt(0)
vi.OneRedraw()
t1 = sph.translation[0]
self.assertEqual(t1 > 0, True)
maa2.setValuesAt(30)
vi.OneRedraw()
t2 = sph.translation[0]
self.assertEqual(int(t1/2), int(t2))
maa2.setValuesAt(60)
vi.OneRedraw()
t3 = sph.translation[0]
self.assertEqual(t3, 0)
# fly in from top
maa3 = FlyInObjectMAA(sph, objectName=None, direction='top', kfpos=[0, 30])
actors = maa3.actors
self.assertEqual(len(actors), 3)
sph.Set(translation=[0,0,0])
# check that the position (translation) of the object changes from top to center
# of the viewer at frames 0 - 15 - 30
maa3.setValuesAt(0)
vi.OneRedraw()
t1 = sph.translation[1]
self.assertEqual(t1 > 0, True)
maa3.setValuesAt(15)
vi.OneRedraw()
t2 = sph.translation[1]
self.assertEqual(int(t1/2), int(t2))
maa3.setValuesAt(30)
vi.OneRedraw()
t3 = sph.translation[1]
self.assertEqual(t3, 0)
# fly in from bottom
maa4 = FlyInObjectMAA(sph, objectName=None, direction='bottom', kfpos=[0, 60])
actors = maa4.actors
self.assertEqual(len(actors),3)
sph.Set(translation=[0,0,0])
sph.Set(visible = 0)
# check that the position (translation) of the object changes from bottom to center
# of the viewer at frames 0 - 30 - 60
maa4.setValuesAt(0)
vi.OneRedraw()
# check that the "visible" maa's actor sets the sph.visible attribute to 1
self.assertEqual(sph.visible, 1)
t1 = sph.translation[1]
self.assertEqual( t1 < 0, True)
maa4.setValuesAt(30)
vi.OneRedraw()
t2 = sph.translation[1]
self.assertEqual( int(t1/2), int(t2))
maa4.setValuesAt(60)
vi.OneRedraw()
t3 = sph.translation[1]
self.assertEqual(t3, 0)
#run maa
maa1.run()
maa2.run()
maa3.run()
self.assertEqual(sph.visible, 1)
maa4.run()
#check we can reproduce the maa from it's sourcecode:
maa5 = None
maasrc = maa4.getSourceCode("maa5")
viewer = vi
exec(maasrc)
assert maa5 != None
self.assertEqual(len(maa5.actors),3)
sph.Set(translation=[0,0,0])
# check that the position (translation) of the object changes from bottom to center
# of the viewer at frames 0 - 30 - 60
maa5.setValuesAt(0)
vi.OneRedraw()
# check that the "visible" maa's actor sets the sph.visible attribute to 1
self.assertEqual(sph.visible, 1)
t1 = sph.translation[1]
self.assertEqual( t1 < 0, True)
maa5.setValuesAt(30)
vi.OneRedraw()
t2 = sph.translation[1]
self.assertEqual( int(t1/2), int(t2))
maa5.setValuesAt(60)
vi.OneRedraw()
t3 = sph.translation[1]
self.assertEqual(t3, 0)
def test_flyout(self):
"""Test creation of FlyOutObjectMAA with different options (number of keyframes, direction); playing different frames of maa ."""
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
vi.AddObject(sph)
from DejaVu.scenarioInterface.animations import FlyOutObjectMAA
# direction: left
sph.Set(translation=[0,0,0])
maa1 = FlyOutObjectMAA(sph, objectName=None, direction='left', kfpos=[0, 30])
actors = maa1.actors
self.assertEqual (len(actors), 3)
vi.OneRedraw()
sph.Set(translation=[5,-5,5])
# check that the position (translation) of the object changes from center to left side
# of the viewer at frames 0 - 15 - 30
maa1.setValuesAt(0)
t1 = sph.translation
vi.OneRedraw()
self.assertEqual ([t1[0], t1[1], t1[2]] , [0, 0, 0])
maa1.setValuesAt(15)
t2 = sph.translation[0]
self.assertEqual(t2 < 0, True)
vi.OneRedraw()
maa1.setValuesAt(30)
t3 = sph.translation[0]
self.assertEqual(int(t3/2), int(t2))
vi.OneRedraw()
# direction: right
sph.Set(translation=[0,0,0])
maa2 = FlyOutObjectMAA(sph, objectName=None, direction='right', kfpos=[0, 60])
actors = maa2.actors
self.assertEqual(len(actors), 3)
vi.OneRedraw()
sph.Set(translation=[5,5,5])
# check that the position (translation) of the object changes from center to right side
# of the viewer at frames 0 - 30 - 60
maa2.setValuesAt(0)
t1 = sph.translation
vi.OneRedraw()
self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
maa2.setValuesAt(30)
t2 = sph.translation[0]
self.assertEqual(t2 > 0, True)
vi.OneRedraw()
maa2.setValuesAt(60)
t3 = sph.translation[0]
self.assertEqual(int(t3/2), int(t2))
vi.OneRedraw()
# direction: top
sph.Set(translation=[0,0,0])
maa3 = FlyOutObjectMAA(sph, objectName=None, direction='top', kfpos=[0, 30])
actors = maa3.actors
self.assertEqual (len(actors), 3)
vi.OneRedraw()
sph.Set(translation=[-5,5,5])
# check that the position (translation) of the object changes from center to top side
# of the viewer at frames 0 - 15 - 30
maa3.setValuesAt(0)
t1 = sph.translation
vi.OneRedraw()
self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
maa3.setValuesAt(15)
t2 = sph.translation[1]
self.assertEqual(t2 > 0, True)
vi.OneRedraw()
maa3.setValuesAt(30)
t3 = sph.translation[1]
self.assertEqual(int(t3/2), int(t2))
vi.OneRedraw()
# direction: bottom
sph.Set(translation=[0,0,0])
maa4 = FlyOutObjectMAA(sph, objectName=None, direction='bottom', kfpos=[0, 60])
actors = maa4.actors
self.assertEqual (len(actors), 3)
sph.Set(visible = 0)
vi.OneRedraw()
sph.Set(translation=[5,5,5])
# check that the position (translation) of the object changes from center to top side
# of the viewer at frames 0 - 30 - 60
maa4.setValuesAt(0)
t1 = sph.translation
vi.OneRedraw()
self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
# check that the "visible" maa's actor sets the sph.visible attribute to 1
self.assertEqual(sph.visible, 1)
maa4.setValuesAt(30)
t2 = sph.translation[1]
self.assertEqual(t2 < 0, True)
vi.OneRedraw()
maa4.setValuesAt(60)
t3 = sph.translation[1]
self.assertEqual(int(t3/2), int(t2))
vi.OneRedraw()
#run maas
maa1.run()
maa2.run()
maa3.run()
self.assertEqual(sph.visible, 1)
maa4.run()
#check we can reproduce the maa from it's sourcecode:
maa5 = None
maasrc = maa4.getSourceCode("maa5")
viewer = vi
exec(maasrc)
assert maa5 != None
self.assertEqual (len(maa5.actors), 3)
sph.Set(translation=[5,5,5])
vi.OneRedraw()
# check that the position (translation) of the object changes from center to top side
# of the viewer at frames 0 - 30 - 60
## maa5.setValuesAt(0)
## t1 = sph.translation
## vi.OneRedraw()
## self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
## # check that the "visible" maa's actor sets the sph.visible attribute to 1
## self.assertEqual(sph.visible, 1)
## maa5.setValuesAt(30)
## t2 = sph.translation[1]
## self.assertEqual(t2 < 0, True)
## vi.OneRedraw()
## maa5.setValuesAt(60)
## t3 = sph.translation[1]
## self.assertEqual(int(t3/2), int(t2))
maa5.run()
def check_fadevals(self, maa, obj, vi):
# check that the opacity of the object changes from 0 to 1
# at frames 0 - 15 - 30
maa.setValuesAt(0)
val1 = obj.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual (val1[0] , 0)
self.assertEqual(obj.visible, 1)
vi.OneRedraw()
maa.setValuesAt(15)
val2 = obj.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual (val2[0] , 0.5)
vi.OneRedraw()
maa.setValuesAt(30)
val3 = obj.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual(val3[0], 1)
vi.OneRedraw()
def test_fadein(self):
"""Test creation of FadeInObjectMAA and playing different frames of maa ."""
vi = viewer = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
viewer.AddObject(sph)
from DejaVu.scenarioInterface.animations import FadeInObjectMAA
maa1 = FadeInObjectMAA(sph, objectName=None, kfpos=[0, 30])
#check we can reproduce the maa from it's sourcecode:
maa2 = None
maasrc = maa1.getSourceCode("maa2")
#viewer = vi
exec(maasrc)
assert maa2 != None
sph.Set(visible = 0)
for maa in [maa1, maa2]:
actors = maa.actors
self.assertEqual (len(actors), 3)
viewer.OneRedraw()
# check that the opacity of the object changes from 0 to 1
# at frames 0 - 15 - 30
maa.setValuesAt(0)
val1 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual (val1[0] , 0)
self.assertEqual(sph.visible, 1)
vi.OneRedraw()
maa.setValuesAt(15)
val2 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual (val2[0] , 0.5)
vi.OneRedraw()
maa.setValuesAt(30)
val3 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual(val3[0], 1)
vi.OneRedraw()
# run maa
maa.run()
def test_fadeout(self):
"""Test creation of FadeInObjectMAA and playing different frames of maa ."""
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
vi.AddObject(sph)
sph.Set(opacity = 0.8)
from DejaVu.scenarioInterface.animations import FadeOutObjectMAA
#from DejaVu.Materials import propertyNum
# create an instance of FadeOutObjectMAA object
maa1 = FadeOutObjectMAA(sph, objectName=None, kfpos=[0, 60])
#check we can reproduce the maa from it's sourcecode:
maa2 = None
maasrc = maa1.getSourceCode("maa2")
viewer = vi
print maasrc
exec(maasrc)
assert maa2 != None
# check the maas
for maa in [maa1, maa2]:
actors = maa.actors
self.assertEqual (len(actors), 3)
vi.OneRedraw()
# check that the opacity of the object changes from 0 to 1
# at frames 0 - 30 - 60
maa.setValuesAt(0)
val1 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual ("%.2f"%val1[0] , "0.80")
vi.OneRedraw()
maa.setValuesAt(30)
val2 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual ("%.2f"%val2[0] , "0.40")
vi.OneRedraw()
maa.setValuesAt(60)
val3 = sph.materials[1028].prop[propertyNum['opacity']]
self.assertEqual(len(val1), 1)
self.assertEqual(val3[0], 0)
vi.OneRedraw()
maa.run()
maa.afterAnimation_cb()
# check that the last maa's afterAnimation method sets the opacity
# to it's original value
val = sph.materials[1028].prop[propertyNum['opacity']][0]
self.assertEqual("%.2f"%val, "0.80")
def test_partialFade(self):
"""Test creation of PartialFadeMAA, and playing different frames of maa ."""
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
vi.AddObject(sph)
from DejaVu.scenarioInterface.animations import PartialFadeMAA
#from DejaVu.Materials import propertyNum
import numpy
initVal = {sph: [0.80, 1.0, 0.80, 1.0]}
finalVal = {sph: numpy.array([0.0,1.0, 0.0, 1.0], 'f')}
maa1 = PartialFadeMAA(sph, initVal, finalVal, kfpos=[0, 100])
#check we can reproduce the maa from it's sourcecode:
maa2 = None
maasrc = maa1.getSourceCode("maa2")
viewer = vi
exec(maasrc)
assert maa2 != None
sph.Set(visible = 0)
for maa in [maa1, maa2]:
actors = maa.actors
self.assertEqual (len(actors), 3)
# test that the opacity of the object is changing from initVal to finalVal when maa is set to frames 0 - 50 -100
maa.setValuesAt(0)
self.assertEqual(sph.visible , 1)
val = sph.materials[1028].prop[1][:,3]
#print "val:" , val
testval = numpy.array(initVal[sph], "f")
self.assertEqual(numpy.alltrue(numpy.equal(val, testval)), True)
vi.OneRedraw()
maa.setValuesAt(50)
val = sph.materials[1028].prop[1][:,3]
#print "val:" , val
testval = numpy.array([0.40, 1., 0.40, 1.], "f")
self.assertEqual(numpy.alltrue(numpy.equal(val, testval)), True)
vi.OneRedraw()
maa.setValuesAt(100)
val = sph.materials[1028].prop[1][:,3]
testval = numpy.array(finalVal[sph], "f")
self.assertEqual(numpy.alltrue(numpy.equal(val, testval)), True)
#print "val:" , val
vi.OneRedraw()
maa.run()
maa.afterAnimation_cb()
#check that the afterAnimation method sets the opacity attribute
# of the object to its original value
val = sph.materials[1028].prop[1][:,3]
#print "val:" , val
testval = numpy.array([1., 1., 1., 1.], "f")
self.assertEqual(numpy.alltrue(numpy.equal(val, testval)), True)
def test_visible(self):
"""Test creation of VisibleObjectMAA """
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
inheritMaterial=False)
vi.AddObject(sph)
from DejaVu.scenarioInterface.animations import VisibleObjectMAA
# create maa that sets "visible" attribute of the object to False
maa1 = VisibleObjectMAA(sph, visible=0)
self.assertEqual (len(maa1.actors), 2)
maa1.setValuesAt(0)
self.assertEqual(sph.visible , 0)
maa1.run()
maa1.afterAnimation_cb()
# afterAnimation_cb() should set the attribute to it's original value
self.assertEqual(sph.visible , 1)
# create maa that sets "visible" attribute of the object to True
maa2 = VisibleObjectMAA(sph, visible=1)
#check we can reproduce the maa from it's sourcecode:
maa3 = None
maasrc = maa2.getSourceCode("maa3")
viewer = vi
exec(maasrc)
assert maa3 != None
for maa in [maa2, maa3]:
self.assertEqual (len(maa.actors), 2)
sph.Set(visible = 0)
maa.setValuesAt(0)
self.assertEqual(sph.visible , 1)
maa.run()
def test_colors(self):
"""Test creation of ColorObjectMAA and setting the maa to different frames """
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,1,1), (1,1,1), (1,1,1) ],
inheritMaterial=False)
lines = IndexedPolylines('lines' ,materials=((0,1,0),),
vertices=((0,0,0), (5, 0,0), (0,5,0), (0, 0,5)), faces=((0,1),(1,2),(2,3),(3,0)),
inheritMaterial = False)
vi.AddObject(sph)
vi.AddObject(lines)
from DejaVu.scenarioInterface.animations import ColorObjectMAA
import numpy
initColors = {sph: [ (1,1,1), (1,1,1), (1,1,1), (1,1,1)], lines: [(0,1,0),]}
finalColors = {sph: [ (1,0,1), (0,0,1), (1,0,0), (0,1,0) ], lines: [(1,0,0)]}
root = vi.rootObject
lines.Set(visible = 0 )
## maa = ColorObjectMAA(sph, initColors[sph], finalColors[sph], nbFrames = 200)
## maa1 = ColorObjectMAA(lines, initColors[lines], finalColors[lines], nbFrames = 200)
## maa.addMultipleActorsActionsAt(maa1)
maa1 = ColorObjectMAA([sph, lines], initColors, finalColors, nbFrames = 200)
#check we can reproduce the maa from it's sourcecode:
maa2 = None
maasrc = maa1.getSourceCode("maa2")
viewer = vi
exec(maasrc)
assert maa2 != None
for maa in [maa1, maa2]:
self.assertEqual(len(maa.actors), 5)
self.assertEqual(len(maa.origValues), 4)
maa.setValuesAt(0)
val1 = sph.materials[1028].prop[1][:, :3]
#print "1:", val1
val2 = lines.materials[1028].prop[1][:, :3]
#print "2", val2
vi.OneRedraw()
self.assertEqual(numpy.alltrue(numpy.equal(val1, numpy.array(initColors[sph], "f"))), True)
self.assertEqual(numpy.alltrue(numpy.equal(val2, numpy.array(initColors[lines], "f"))), True)
self.assertEqual(lines.visible , 1)
maa.setValuesAt(100)
val1 = sph.materials[1028].prop[1][:, :3]
testval1 = numpy.array([[ 1., 0.5, 1. ], [0.5,0.5, 1.], [1., 0.5, 0.5], [0.5, 1., 0.5]],'f')
#print "1:", val1
val2 = lines.materials[1028].prop[1][:, :3]
#print "2:", val2
testval2 = numpy.array([[ 0.5, 0.5, 0. ]],'f')
vi.OneRedraw()
self.assertEqual(numpy.alltrue(numpy.equal(val1, testval1)), True)
self.assertEqual(numpy.alltrue(numpy.equal(val2, testval2)), True)
maa.setValuesAt(200)
val1 = sph.materials[1028].prop[1][:, :3]
#print "1:", val1
val2 = lines.materials[1028].prop[1][:, :3]
#print "2:", val2
vi.OneRedraw()
self.assertEqual(numpy.alltrue(numpy.equal(val1, numpy.array(finalColors[sph], "f"))), True)
self.assertEqual(numpy.alltrue(numpy.equal(val2, numpy.array(finalColors[lines], "f"))), True)
sph.Set(materials = [ (0,0,1), (0.5,0,1), (0,0,1), (05,1,1)], inheritMaterial = 0)
lines.Set(materials = [ (1,1,1),], inheritMaterial = 0)
# check that the original values are set to the geometries after the maa run.
maa.run()
maa.afterAnimation_cb()
val1 = sph.materials[1028].prop[1][:, :3]
val2 = lines.materials[1028].prop[1][:, :3]
self.assertEqual(numpy.alltrue(numpy.equal(val1, numpy.array(initColors[sph], "f"))), True)
self.assertEqual(numpy.alltrue(numpy.equal(val2, numpy.array(initColors[lines], "f"))), True)
self.assertEqual(lines.visible , 0)
vi.OneRedraw()
def test_rotationMAA(self):
from DejaVu.scenarioInterface.animations import RotationMAA
import numpy
vi = self.vi
sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
materials = [ (1,1,1), (1,1,1), (1,1,1), (1,1,1) ],
inheritMaterial=False)
vi.AddObject(sph)
root = vi.rootObject
origRot = root.rotation.copy()
maa1 = RotationMAA(root, angle = 120, nbFrames= 90 )
maa2 = None
maasrc = maa1.getSourceCode("maa2")
viewer = vi
exec(maasrc)
assert maa2 != None
for maa in [maa1, maa2]:
for i in range(30):
maa.setValuesAt(i)
vi.OneRedraw()
rots = [origRot, root.rotation.copy()]
assert numpy.alltrue(numpy.equal(rots[0], rots[1])) == False
for i in range(30, 60):
maa.setValuesAt(i)
vi.OneRedraw()
for rr in rots:
assert numpy.alltrue(numpy.equal(rr, root.rotation)) == False
rots.append(root.rotation.copy())
for i in range(60, 90):
maa.setValuesAt(i)
vi.OneRedraw()
for rr in rots:
assert numpy.alltrue(numpy.equal(rr, root.rotation)) == False
# check that afterAnimation() sets the object to it's orig. value
maa.run()
#assert numpy.alltrue(numpy.equal(origRot, root.rotation)) == True
#print "rotation after run", root.rotation
| 38.436709
| 137
| 0.548246
| 2,990
| 24,292
| 4.449164
| 0.067559
| 0.018492
| 0.012403
| 0.010223
| 0.792528
| 0.720364
| 0.704277
| 0.679696
| 0.660453
| 0.650455
| 0
| 0.066941
| 0.319241
| 24,292
| 631
| 138
| 38.497623
| 0.737498
| 0.167463
| 0
| 0.758772
| 0
| 0
| 0.010513
| 0
| 0
| 0
| 0
| 0
| 0.217105
| 0
| null | null | 0.002193
| 0.039474
| null | null | 0.002193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
913c65253d82311f4cd43b2493840bfc2d050deb
| 77
|
py
|
Python
|
volcano/srv104/__init__.py
|
Bubkagob/iec104server
|
fb45e6864135575d75e3a8a178d79db652179f2b
|
[
"MIT"
] | 6
|
2019-06-28T14:48:51.000Z
|
2022-03-09T02:14:34.000Z
|
volcano/srv104/__init__.py
|
suchaoxiao/iec104server
|
fb45e6864135575d75e3a8a178d79db652179f2b
|
[
"MIT"
] | 1
|
2019-06-12T13:53:08.000Z
|
2019-06-17T13:32:44.000Z
|
volcano/srv104/__init__.py
|
suchaoxiao/iec104server
|
fb45e6864135575d75e3a8a178d79db652179f2b
|
[
"MIT"
] | 2
|
2019-07-09T03:46:10.000Z
|
2020-06-04T01:19:29.000Z
|
import sys
import os
sys.path.append(os.getcwd() + '/deps/peavy-client/src')
| 19.25
| 55
| 0.727273
| 13
| 77
| 4.307692
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 77
| 3
| 56
| 25.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
914d4f5793ed6bcfd1e784ab7aa411107c109a01
| 353
|
py
|
Python
|
tests-predict/test_app.py
|
doksketch/call-you-maybe
|
cb20f9fc2c996a7396bccec0ee2a4b33a9f1fe1a
|
[
"MIT"
] | null | null | null |
tests-predict/test_app.py
|
doksketch/call-you-maybe
|
cb20f9fc2c996a7396bccec0ee2a4b33a9f1fe1a
|
[
"MIT"
] | null | null | null |
tests-predict/test_app.py
|
doksketch/call-you-maybe
|
cb20f9fc2c996a7396bccec0ee2a4b33a9f1fe1a
|
[
"MIT"
] | null | null | null |
"""
from unittest import patch
from unittest.mock import mock_open
with patch('__main__.open', mock_open(read_data='')) as m:
"""
from app.worker import config
def test_config():
assert 'name' in config['worker']
assert 'broker' in config['worker']
assert 'backend' in config['worker']
assert config['worker']['backend'] == 'rpc://'
| 22.0625
| 58
| 0.679887
| 48
| 353
| 4.833333
| 0.479167
| 0.206897
| 0.181034
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161473
| 353
| 15
| 59
| 23.533333
| 0.783784
| 0.342776
| 0
| 0
| 0
| 0
| 0.241071
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6b0cfa36d7978d9ddf002511a6b296a64ff2d2e
| 602
|
py
|
Python
|
doxygen/publish.py
|
LaudateCorpus1/caffe2.github.io
|
8cbc1076d9b7a4858ac330b1e31fe3074f06f673
|
[
"CC-BY-4.0"
] | 26
|
2018-05-01T05:13:00.000Z
|
2022-02-05T18:17:35.000Z
|
doxygen/publish.py
|
LaudateCorpus1/caffe2.github.io
|
8cbc1076d9b7a4858ac330b1e31fe3074f06f673
|
[
"CC-BY-4.0"
] | 16
|
2018-03-28T22:54:52.000Z
|
2020-02-25T10:22:45.000Z
|
doxygen/publish.py
|
LaudateCorpus1/caffe2.github.io
|
8cbc1076d9b7a4858ac330b1e31fe3074f06f673
|
[
"CC-BY-4.0"
] | 39
|
2018-03-27T17:55:04.000Z
|
2022-02-05T18:17:37.000Z
|
## @package publish
# Module doxygen.publish
import os, shutil
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-c"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-c")
if os.path.exists("/Users/aaronmarkham/caffe2/doxygen-python"):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree("/Users/aaronmarkham/caffe2/doxygen-python")
os.system("cp -rf doxygen-c /Users/aaronmarkham/caffe2/")
os.system("cp -rf doxygen-python /Users/aaronmarkham/caffe2/")
| 43
| 85
| 0.739203
| 88
| 602
| 5.056818
| 0.386364
| 0.229213
| 0.310112
| 0.269663
| 0.773034
| 0.65618
| 0.65618
| 0.65618
| 0.458427
| 0.458427
| 0
| 0.011257
| 0.114618
| 602
| 13
| 86
| 46.307692
| 0.82364
| 0.064784
| 0
| 0.222222
| 0
| 0
| 0.699463
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6d3a03ed237079e4173f548a6118e97c5a9b92a
| 938
|
py
|
Python
|
kiko/python/kiko/utils/value.py
|
danielefederico/kiko
|
b1ce367cc86c788b57d297be3091d822b9d49c4f
|
[
"MIT"
] | 81
|
2018-09-12T22:20:02.000Z
|
2022-03-07T03:40:51.000Z
|
kiko/python/kiko/utils/value.py
|
danielefederico/kiko
|
b1ce367cc86c788b57d297be3091d822b9d49c4f
|
[
"MIT"
] | 8
|
2018-11-11T04:52:48.000Z
|
2021-03-07T22:24:34.000Z
|
kiko/python/kiko/utils/value.py
|
danielefederico/kiko
|
b1ce367cc86c788b57d297be3091d822b9d49c4f
|
[
"MIT"
] | 21
|
2018-09-13T16:04:49.000Z
|
2021-08-09T07:12:34.000Z
|
# ==============================================================================
#
# KIKO is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version. This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# ==============================================================================
import math
def floats_equal(float1, float2, places=3):
return math.fabs(float(float1) - float(float2)) < float(10 ** -places)
| 49.368421
| 80
| 0.628998
| 125
| 938
| 4.712
| 0.632
| 0.025467
| 0.061121
| 0.096774
| 0.169779
| 0.169779
| 0.11545
| 0
| 0
| 0
| 0
| 0.009975
| 0.144989
| 938
| 18
| 81
| 52.111111
| 0.724439
| 0.829424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
fc01ce7561b486a816f7611dd646335febf66976
| 41
|
py
|
Python
|
backend/scripts/index/__init__.py
|
gnulnx/mynode
|
468684e55c4bc7efb30bd3d1f1e090cd0f6dd076
|
[
"MIT"
] | 1
|
2021-12-31T16:27:55.000Z
|
2021-12-31T16:27:55.000Z
|
backend/scripts/index/__init__.py
|
gnulnx/mynode
|
468684e55c4bc7efb30bd3d1f1e090cd0f6dd076
|
[
"MIT"
] | null | null | null |
backend/scripts/index/__init__.py
|
gnulnx/mynode
|
468684e55c4bc7efb30bd3d1f1e090cd0f6dd076
|
[
"MIT"
] | null | null | null |
from .cmds import all
__all__ = ["all"]
| 10.25
| 21
| 0.658537
| 6
| 41
| 3.833333
| 0.666667
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 3
| 22
| 13.666667
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc5c65fd74c7e3e1aa484feac17c19066fdb43a6
| 769
|
py
|
Python
|
tests/jmath/test_permutation.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | 6
|
2017-03-31T16:58:52.000Z
|
2019-05-11T20:12:07.000Z
|
tests/jmath/test_permutation.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | null | null | null |
tests/jmath/test_permutation.py
|
jabbalaci/jabbapylib3
|
ddc8fe88b89c4379254183b9a7c1405574a3a262
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from jplib.jmath import permutation as perm
def test_lexicographically_next_permutation():
cnt = 0
li = ['a', 'b', 'c']
cnt += 1
while perm.lexicographically_next_permutation(li):
cnt += 1
assert cnt == 6
#
cnt = 0
li = [3,6,6,7]
cnt += 1
while perm.lexicographically_next_permutation(li):
cnt += 1
assert cnt == 12
#
li = [1,2,9,6,5]
perm.lexicographically_next_permutation(li)
assert li == [1,5,2,6,9]
perm.lexicographically_next_permutation(li)
assert li == [1,5,2,9,6]
perm.lexicographically_next_permutation(li)
assert li == [1,5,6,2,9]
#
li = ['C','A','D','B']
perm.lexicographically_next_permutation(li)
assert li == ['C','B','A','D']
| 24.03125
| 54
| 0.596879
| 112
| 769
| 3.964286
| 0.276786
| 0.331081
| 0.504505
| 0.486486
| 0.702703
| 0.702703
| 0.702703
| 0.599099
| 0.599099
| 0.490991
| 0
| 0.058621
| 0.245774
| 769
| 31
| 55
| 24.806452
| 0.706897
| 0.016905
| 0
| 0.5
| 0
| 0
| 0.014647
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc7418e30280d1f80de1d90394794c0d1627dd33
| 20,183
|
py
|
Python
|
r2cmplr/_gencode_wblock.py
|
manub686/atomix
|
80ca2b675f49d2aef1e078a36a282d7173e02805
|
[
"Apache-2.0"
] | 3
|
2015-04-21T21:04:48.000Z
|
2015-06-03T08:55:36.000Z
|
r2cmplr/_gencode_wblock.py
|
manubansal/atomix
|
80ca2b675f49d2aef1e078a36a282d7173e02805
|
[
"Apache-2.0"
] | 1
|
2015-06-11T22:35:48.000Z
|
2015-06-11T22:35:48.000Z
|
r2cmplr/_gencode_wblock.py
|
manub686/atomix
|
80ca2b675f49d2aef1e078a36a282d7173e02805
|
[
"Apache-2.0"
] | null | null | null |
'''
Atomix project, _gencode_wblock.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
from _db import *
from _codegen_write_out import *
from _util import *
class WblockCodeGenerator:
#---------------------
def gencode_wblock(self, wblockrow, GEN_BLOCK_DEBUG=False, GEN_DO_FUNC = True, TS=False, ii_only=False, atomid=0):
#---------------------
(wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string) = wblockrow
allcode_h = ''
allcode_c = ''
inp_list = inptstr.split(',') if inptstr != '' else []
out_list = outtstr.split(',') if outtstr != '' else []
cf_list = conftstr.split(',') if conftstr != '' else []
fifo_access_signature = fifo_access_signature_string.split(',')
inp_signature = fifo_access_signature[0:len(inp_list)] if inptstr != '' else []
out_signature = fifo_access_signature[len(inp_list):(len(inp_list) + len(out_list))] if outtstr != '' else []
self.logger.debug(wblockname)
self.logger.debug('%s, %s' % (inp_list, inp_signature))
self.logger.debug('%s, %s' % (out_list, out_signature))
code_blocktype = 'typedef struct {\n'
code_h_setupWiring = 'void %s_setupWiring (\n' % wblockname
code_h_setupWiring += ' %s *bli' % wblockname
code_h_setupDebug = 'void %s_setupDebug (\n' % wblockname
code_h_setupDebug += ' %s *bli' % wblockname
code_h_setupDebug += ',\n Uint32 atomid'
code_h_setupDebug += ',\n Uint32 debug'
code_h_setupDebug += ',\n Uint32 identify'
if TS:
##code_h_do = 'void %s_TS%d_do (%s *bli);\n' % (wblockname, atomid, wblockname)
code_h_do = 'void %s_TS_do (%s *bli);\n' % (wblockname, wblockname)
else:
if GEN_DO_FUNC:
code_h_do = 'void %s_do (%s *bli);\n' % (wblockname, wblockname)
else:
code_h_do = ''
code_c_setupWiring = code_h_setupWiring
code_c_setupDebug = code_h_setupDebug
if TS:
##code_c_do = 'void %s_TS%d_do (%s *bli) {\n' % (wblockname, atomid, wblockname)
code_c_do = 'void %s_TS_do (%s *bli) {\n' % (wblockname, wblockname)
else:
if GEN_DO_FUNC:
code_c_do = 'void %s_do (%s *bli) {\n' % (wblockname, wblockname)
else:
code_c_do = ''
code_c_do_decl = '';
code_c_do_cmd1 = '';
code_c_do_cmd1d = '';
code_c_do_cmd2 = '';
code_c_do_cmd2d = '';
code_c_do_cmd3 = '';
code_c_setupWiring_body = ''
code_c_setupDebug_body = ''
atomid_string = 'bli->atomid'
to_identify_string = 'bli->identify'
if GEN_BLOCK_DEBUG:
code_c_do += ' OSL_printAtomId(%s, %s);\n' % (atomid_string, to_identify_string)
for i in range(len(inp_list)):
code_blocktype += ' FIFO_Handle ff_inp%d;\n' % i
code_blocktype += ' Uint32 ff_inp%d_nBuffersInFifo;\n' % i
code_blocktype += ' FIFO_BufferHandle ff_inp%d_bufferStates;\n' % i
code_h_setupWiring += ',\n FIFO_Handle ff_inp%d' % i
code_h_setupWiring += ',\n Uint32 ff_inp%d_nBuffersInFifo' % i
code_h_setupWiring += ',\n FIFO_BufferHandle ff_inp%d_bufferStates' % i
code_c_setupWiring += ',\n FIFO_Handle ff_inp%d' % i
code_c_setupWiring += ',\n Uint32 ff_inp%d_nBuffersInFifo' % i
code_c_setupWiring += ',\n FIFO_BufferHandle ff_inp%d_bufferStates' % i
code_c_setupWiring_body += ' bli->ff_inp%d = ff_inp%d;\n' % (i, i)
code_c_setupWiring_body += ' bli->ff_inp%d_nBuffersInFifo = ff_inp%d_nBuffersInFifo;\n' % (i, i)
code_c_setupWiring_body += ' bli->ff_inp%d_bufferStates = ff_inp%d_bufferStates;\n' % (i, i)
code_c_do_decl += ' FIFO_BufferHandle bhi%d;\n' % i
if (inp_signature[i] == 'r'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReadBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'u'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReReadBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'o'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReReIcBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 't' or inp_signature[i] == 's'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReadBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'e'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReadBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '1'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'i'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReadBusyBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '5'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBusyBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '9'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBusyBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'v'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReReadBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'p'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextReReIcBuffer_remote(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'd'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReadBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '0'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == 'h'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReadBusyBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '4'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBusyBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
elif (inp_signature[i] == '8'):
code_c_do_cmd1 += ' bhi%d = FIFO_getNextAsyncReIcBusyBuffer(bli->ff_inp%d, bli->ff_inp%d_nBuffersInFifo, bli->ff_inp%d_bufferStates);\n' % (i, i, i, i)
else:
print_line()
self.logger.error("I do not know how to generate code for inp_signature %s" % inp_signature[i])
print_line()
sys.exit(1)
atomid = 'bli->atomid'
to_debug = 'bli->debug'
bufid = 'bhi%d' % i
bufmem = bufid + '->mem'
if inp_list[i] == 'void':
nbytes = 'bli->ff_inp%d->bufferSizeInBytes' % i
else:
nbytes = 'sizeof(%s)' % inp_list[i]
code_c_do_cmd1d += ' OSL_dumpBuffer(%s, %s, "%s", %s, %s);\n' % (atomid, to_debug, bufid, bufmem, nbytes)
code_c_do_cmd2 += ' (%s *)(bhi%d->mem)' % (inp_list[i], i)
if (i < len(inp_list) + len(out_list) - 1):
code_c_do_cmd2 += ','
code_c_do_cmd2 += '\n'
if (inp_signature[i] == 'r'):
code_c_do_cmd3 += ' FIFO_readDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'u'):
code_c_do_cmd3 += ' FIFO_reReadDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'o'):
code_c_do_cmd3 += ' FIFO_reReadDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 't' or inp_signature[i] == 's'):
code_c_do_cmd3 += ' FIFO_readDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'e'):
pass
elif (inp_signature[i] == '1'):
pass
elif (inp_signature[i] == 'i'):
code_c_do_cmd3 += ' FIFO_readDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == '5'):
code_c_do_cmd3 += ' FIFO_readDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == '9'):
code_c_do_cmd3 += ' FIFO_reReadDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'v'):
code_c_do_cmd3 += ' FIFO_reReadDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'p'):
code_c_do_cmd3 += ' FIFO_reReadDone_remote(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == 'd'):
pass #no buffer return code in this case
elif (inp_signature[i] == '0'):
pass #no buffer return code in this case
elif (inp_signature[i] == 'h'):
code_c_do_cmd3 += ' FIFO_readDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == '4'):
code_c_do_cmd3 += ' FIFO_readDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
elif (inp_signature[i] == '8'):
code_c_do_cmd3 += ' FIFO_reReadDone(bli->ff_inp%d, bhi%d);\n' % (i, i)
else:
print_line()
self.logger.error("I do not know how to generate code for inp_signature %s" % inp_signature[i])
print_line()
sys.exit(1)
for i in range(len(out_list)):
code_blocktype += ' FIFO_Handle ff_out%d;\n' % i
code_blocktype += ' Uint32 ff_out%d_nBuffersInFifo;\n' % i
code_blocktype += ' FIFO_BufferHandle ff_out%d_bufferStates;\n' % i
code_h_setupWiring += ',\n FIFO_Handle ff_out%d' % i
code_h_setupWiring += ',\n Uint32 ff_out%d_nBuffersInFifo' % i
code_h_setupWiring += ',\n FIFO_BufferHandle ff_out%d_bufferStates' % i
code_c_setupWiring += ',\n FIFO_Handle ff_out%d' % i
code_c_setupWiring += ',\n Uint32 ff_out%d_nBuffersInFifo' % i
code_c_setupWiring += ',\n FIFO_BufferHandle ff_out%d_bufferStates' % i
code_c_setupWiring_body += ' bli->ff_out%d = ff_out%d;\n' % (i, i)
code_c_setupWiring_body += ' bli->ff_out%d_nBuffersInFifo = ff_out%d_nBuffersInFifo;\n' % (i, i)
code_c_setupWiring_body += ' bli->ff_out%d_bufferStates = ff_out%d_bufferStates;\n' % (i, i)
code_c_do_decl += ' FIFO_BufferHandle bho%d;\n' % i
if (out_signature[i] == 'y' or out_signature[i] == 'x'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextWriteBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'n'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextReWriteBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'z'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextReWrIcBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'g'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriteBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == '3'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriIcBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'l'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriteBusyBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == '7'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriIcBusyBuffer_remote(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'f'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriteBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == '2'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriIcBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'k'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriteBusyBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == '6'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextAsyncWriIcBusyBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'w'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextWriteBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'm'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextReWriteBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
elif (out_signature[i] == 'q'):
code_c_do_cmd1 += ' bho%d = FIFO_getNextReWrIcBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
#else:
# code_c_do_cmd1 += ' bho%d = FIFO_getNextWriteBuffer(bli->ff_out%d, bli->ff_out%d_nBuffersInFifo, bli->ff_out%d_bufferStates);\n' % (i, i, i, i)
else:
print_line()
self.logger.error("I do not know how to generate code for out_signature %s" % out_signature[i])
print_line()
sys.exit(1)
code_c_do_cmd2 += ' (%s *)(bho%d->mem)' % (out_list[i], i)
if (i < len(out_list) - 1):
code_c_do_cmd2 += ','
code_c_do_cmd2 += '\n'
if (out_signature[i] == 'y' or out_signature[i] == 'x'):
code_c_do_cmd3 += ' FIFO_writeDone_remote(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'n'):
code_c_do_cmd3 += ' FIFO_reWriteDone_remote(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'z'):
code_c_do_cmd3 += ' FIFO_reWriteDone_remote(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'g'):
pass
elif (out_signature[i] == '3'):
pass
elif (out_signature[i] == 'l'):
code_c_do_cmd3 += ' FIFO_writeDone_remote(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == '7'):
code_c_do_cmd3 += ' FIFO_writeDone_remote(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'f'):
pass #no buffer return code in this case
elif (out_signature[i] == '2'):
pass #no buffer return code in this case
elif (out_signature[i] == 'k'):
code_c_do_cmd3 += ' FIFO_writeDone(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == '6'):
code_c_do_cmd3 += ' FIFO_writeDone(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'w'):
code_c_do_cmd3 += ' FIFO_writeDone(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'm'):
code_c_do_cmd3 += ' FIFO_reWriteDone(bli->ff_out%d, bho%d);\n' % (i, i)
elif (out_signature[i] == 'q'):
code_c_do_cmd3 += ' FIFO_reWriteDone(bli->ff_out%d, bho%d);\n' % (i, i)
#else:
# code_c_do_cmd3 += ' FIFO_writeDone(bli->ff_out%d, bho%d);\n' % (i, i)
else:
print_line()
self.logger.error("I do not know how to generate code for out_signature %s" % out_signature[i])
print_line()
sys.exit(1)
atomid = 'bli->atomid'
to_debug = 'bli->debug'
bufid = 'bho%d' % i
bufmem = bufid + '->mem'
if out_list[i] == 'void':
nbytes = 'bli->ff_out%d->bufferSizeInBytes' % i
else:
nbytes = 'sizeof(%s)' % out_list[i]
code_c_do_cmd2d += ' OSL_dumpBuffer(%s, %s, "%s", %s, %s);\n' % (atomid, to_debug, bufid, bufmem, nbytes)
code_c_setupDebug_body += ' \n bli->atomid = atomid;\n'
code_c_setupDebug_body += ' \n bli->debug = debug;\n'
code_c_setupDebug_body += ' \n bli->identify = identify;\n'
if (len(cf_list) > 1):
self.logger.error("At most one configuration type (CF) io is allowed")
exit(1)
for i in range(len(cf_list)):
code_blocktype += ' %s blconf;\n' % (cf_list[i])
if i == 0 and len(inp_list) + len(out_list) == 0:
pass
else:
code_c_do_cmd2 += ',\n'
code_c_do_cmd2 += ' &(bli->blconf)'
code_c_do_cmd2 += '\n'
if GEN_BLOCK_DEBUG:
code_blocktype += ' Uint32 atomid;\n'
code_blocktype += ' Uint32 debug;\n'
code_blocktype += ' Uint32 identify;\n'
code_blocktype += '} %s;\n' % wblockname
code_h_setupWiring += '\n );\n'
code_c_setupWiring += '\n ){\n'
code_c_setupWiring += code_c_setupWiring_body
code_c_setupWiring += '}\n'
code_h_setupDebug += '\n );\n'
code_c_setupDebug += '\n ){\n'
code_c_setupDebug += code_c_setupDebug_body
code_c_setupDebug += '}\n'
code_c_do += '%s\n' % code_c_do_decl
if TS and not ii_only:
code_c_do += ' SYS_TimeStamp_aliased(%s);\n\n' % (atomid)
code_c_do += '%s\n' % code_c_do_cmd1
if TS:
code_c_do += ' SYS_TimeStamp_aliased(%s);\n\n' % (atomid)
if GEN_BLOCK_DEBUG:
code_c_do += code_c_do_cmd1d + '\n'
code_c_do += ' %s_i(\n' % blockname
code_c_do += '%s' % code_c_do_cmd2
code_c_do += ' );\n\n'
if GEN_BLOCK_DEBUG:
code_c_do += code_c_do_cmd2d + '\n'
if TS:
code_c_do += ' SYS_TimeStamp_aliased(%s);\n\n' % (atomid)
code_c_do += '%s\n' % code_c_do_cmd3
if TS and not ii_only:
code_c_do += ' SYS_TimeStamp_aliased(%s);\n\n' % (atomid)
code_c_do += '}\n'
if not GEN_BLOCK_DEBUG:
code_h_setupDebug = '';
code_c_setupDebug = '';
self.logger.debug('allcode_h')
if TS:
code = '%s\n' % (code_h_do)
else:
#code = '%s\n%s\n%s\n' % (code_blocktype, code_h_setupWiring, code_h_do)
code = '%s\n%s\n%s\n%s\n' % (code_blocktype, code_h_setupWiring, code_h_setupDebug, code_h_do)
self.logger.debug(code)
allcode_h = code
self.logger.debug('allcode_c')
if TS:
code = '%s\n' % (code_c_do)
else:
if GEN_DO_FUNC:
#code = '%s\n%s\n' % (code_c_setupWiring, code_c_do)
code = '%s\n%s\n%s\n' % (code_c_setupWiring, code_c_setupDebug, code_c_do)
else:
#code = '%s\n' % (code_c_setupWiring)
code = '%s\n%s\n' % (code_c_setupWiring, code_c_setupDebug)
self.logger.debug(code)
allcode_c = code
return (allcode_h, allcode_c)
###########################
def gencode_wblocks(self, dbo, APPDIRNAME, GEN_BLOCK_DEBUG, GEN_DO_FUNC = True, atoms_already_generated=[]):
wblockdir = APPDIRNAME
db_wblocks = dbo.db_select_distinct_wblocks()
for row in db_wblocks:
wblockname = row['wblockname']
blockname = row['blockname']
wblock_code_h, wblock_code_c = self.gencode_wblock(row, GEN_BLOCK_DEBUG, GEN_DO_FUNC)
wblock_h_file = wblockdir + '/__' + wblockname + '__.h'
wblock_c_file = wblockdir + '/__' + wblockname + '__.c'
codegen_write_out_wblock(wblockname, blockname, wblock_h_file, wblock_c_file, wblock_code_h, wblock_code_c, GEN_BLOCK_DEBUG)
###########################
def gencode_wblockTS(self, dbo, APPDIRNAME, wblockname, GEN_BLOCK_DEBUG, atomid, ii_only=False):
wblockdir = APPDIRNAME
db_wblocks = dbo.db_select_wblock_row(wblockname)
row = db_wblocks[0]
wblockname = row['wblockname']
blockname = row['blockname']
wblock_code_h, wblock_code_c = self.gencode_wblock(row, GEN_BLOCK_DEBUG, True, True, ii_only, atomid)
owblockname = wblockname
##wblockname = '%s_TS%d' % (wblockname, atomid)
wblockname = '%s_TS' % (wblockname)
wblock_h_file = wblockdir + '/__' + wblockname + '__.h'
wblock_c_file = wblockdir + '/__' + wblockname + '__.c'
codegen_write_out_wblock(wblockname, blockname, wblock_h_file, wblock_c_file, wblock_code_h, wblock_code_c, GEN_BLOCK_DEBUG, True, owblockname)
generated_wblockname = wblockname
return generated_wblockname
def __init__(self, logger):
self.logger = logger
| 49.468137
| 165
| 0.629738
| 3,135
| 20,183
| 3.721212
| 0.06252
| 0.059146
| 0.060003
| 0.049374
| 0.832248
| 0.794274
| 0.740099
| 0.709669
| 0.61975
| 0.59832
| 0
| 0.008562
| 0.201407
| 20,183
| 407
| 166
| 49.589681
| 0.715225
| 0.039241
| 0
| 0.491176
| 0
| 0.088235
| 0.361659
| 0.237097
| 0
| 0
| 0
| 0.002457
| 0
| 0
| null | null | 0.026471
| 0.008824
| null | null | 0.026471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc7e00ae1d0f223032a52a9293eb5da00f274feb
| 3,126
|
py
|
Python
|
exception_for_client.py
|
etozhezhenechka/VadikDB
|
6f5a80f2a525871375a64e40f212f23fb26a3eb4
|
[
"Unlicense"
] | null | null | null |
exception_for_client.py
|
etozhezhenechka/VadikDB
|
6f5a80f2a525871375a64e40f212f23fb26a3eb4
|
[
"Unlicense"
] | null | null | null |
exception_for_client.py
|
etozhezhenechka/VadikDB
|
6f5a80f2a525871375a64e40f212f23fb26a3eb4
|
[
"Unlicense"
] | null | null | null |
class DBExceptionForClient(Exception):
def __init__(self):
self.error_code = ""
def DBFileNotExists(self):
self.error_code = "00"
return "Error code: " + self.error_code + " -- DB file not exists!"
def WrongFileFormat(self):
self.error_code = "01"
return "Error code: " + self.error_code + " -- Wrong file format!"
def WrongSignature(self):
self.error_code = "02"
return "Error code: " + self.error_code + " -- Wrong signature!"
def TableAlreadyExists(self, table_name):
self.error_code = "03"
return "Error code: " + self.error_code + " -- Table " + table_name + " already exists!"
def TableNotExists(self, table_name):
self.error_code = "04"
return "Error code: " + self.error_code + " -- Table " + table_name + " not exists!"
def FieldNotExists(self, field_name):
self.error_code = "05"
return "Error code: " + self.error_code + " -- Field " + field_name + " not exists!"
def InvalidDataType(self):
self.error_code = "06"
return "Error code: " + self.error_code + " -- Invalid Data Type"
def IncorrectSyntax(self, pos, token):
self.error_code = "07"
return "Error code: " + self.error_code + " -- Code Incorrect Syntax" + "\n" \
+ "The position of error: " + str(pos) + "\nToken: " + str(token)
def DuplicateFields(self, field_name):
self.error_code = "08"
return "Error code: " + self.error_code + " -- Duplicate field: " + str(field_name)
def ValueNotExists(self, value):
self.error_code = "09"
return "Error code: " + self.error_code + " -- Value not exists: " + str(value)
def WrongFieldType(self, field):
self.error_code = "10"
return "Error code: " + self.error_code + " -- Wrong Field Type: " + str(field[0]) + ":" + str(field[1])
def DifferentCount(self):
self.error_code = "11"
return "Error code: " + self.error_code + " -- Different count fields and types!"
def TypeNotExists(self, type_name):
self.error_code = "12"
return "Error code: " + self.error_code + " -- Type not exists: " + str(type_name)
def DifferentNumberOfColumns(self):
self.error_code = "13"
return "Error code: " + self.error_code + " -- The used SELECT statements have a different number of columns"
def NoTableSpecified(self, field):
self.error_code = "14"
return "Error code: " + self.error_code + " -- The field: " + field + " does not have a table"
def TransactionNotDefined(self, user_index):
self.error_code = "15"
return "Error code: " + self.error_code + " -- User(user_index: " + str(user_index) + ") not defined transaction"
def IndexAlreadyExists(self, table_name):
self.error_code = "16"
return "Error code: " + self.error_code + " -- Index " + table_name + " already exists!"
def IndexNotExists(self, table_name):
self.error_code = "17"
return "Error code: " + self.error_code + " -- Index " + table_name + " not exists!"
| 39.56962
| 121
| 0.605566
| 371
| 3,126
| 4.946092
| 0.242588
| 0.269755
| 0.262125
| 0.186376
| 0.443052
| 0.401635
| 0.179292
| 0.091553
| 0.091553
| 0
| 0
| 0.016436
| 0.260397
| 3,126
| 78
| 122
| 40.076923
| 0.777249
| 0
| 0
| 0
| 0
| 0
| 0.25184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.327586
| false
| 0
| 0
| 0
| 0.655172
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
fc89fa15a6a6b14e0f1ebae7e6d608d7682ad01c
| 136
|
py
|
Python
|
reclab/recommenders/autorec/__init__.py
|
lematt1991/RecLab
|
7ba212ac2ae346fb6dfeec232eef652d7f26e193
|
[
"MIT"
] | 51
|
2020-09-17T08:51:42.000Z
|
2022-03-26T20:44:48.000Z
|
reclab/recommenders/autorec/__init__.py
|
kiminh/RecLab
|
7fd29d1c780e91910008a322b04e1b1149a203c8
|
[
"MIT"
] | 25
|
2020-09-04T00:12:44.000Z
|
2021-10-05T02:21:58.000Z
|
reclab/recommenders/autorec/__init__.py
|
kiminh/RecLab
|
7fd29d1c780e91910008a322b04e1b1149a203c8
|
[
"MIT"
] | 6
|
2020-11-30T03:34:25.000Z
|
2022-02-08T18:27:48.000Z
|
"""
The package for the Autorec recommender.
See https://doi.org/10.1145/2740908.2742726 for details.
"""
from .autorec import Autorec
| 19.428571
| 56
| 0.75
| 20
| 136
| 5.1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168067
| 0.125
| 136
| 6
| 57
| 22.666667
| 0.689076
| 0.720588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fc8a880d2354e041e13d77383efbd1d1a6cc9912
| 160
|
py
|
Python
|
common/types.py
|
mlrepa/mlpanel
|
94cf5ff9e654ead414a5b671bad760ed9a7c6382
|
[
"MIT"
] | 11
|
2020-01-25T07:19:43.000Z
|
2021-07-31T14:11:01.000Z
|
common/types.py
|
mlrepa/mlpanel
|
94cf5ff9e654ead414a5b671bad760ed9a7c6382
|
[
"MIT"
] | 9
|
2020-01-24T19:44:48.000Z
|
2020-05-10T09:57:00.000Z
|
common/types.py
|
mlrepa/mlpanel
|
94cf5ff9e654ead414a5b671bad760ed9a7c6382
|
[
"MIT"
] | null | null | null |
from enum import Enum
class StrEnum(str, Enum):
"""Enum where members are also (and must be) strings"""
def __str__(self):
return self.value
| 17.777778
| 59
| 0.65625
| 23
| 160
| 4.391304
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24375
| 160
| 8
| 60
| 20
| 0.834711
| 0.30625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5ddeac9742f5b8d3ec206d89d95db173fdd50fba
| 759
|
py
|
Python
|
OpenGLCffi/GL/EXT/EXT/pixel_transform.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/EXT/pixel_transform.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/EXT/pixel_transform.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
from OpenGLCffi.GL import params
@params(api='gl', prms=['target', 'pname', 'param'])
def glPixelTransformParameteriEXT(target, pname, param):
pass
@params(api='gl', prms=['target', 'pname', 'param'])
def glPixelTransformParameterfEXT(target, pname, param):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glPixelTransformParameterivEXT(target, pname, params):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glPixelTransformParameterfvEXT(target, pname, params):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetPixelTransformParameterivEXT(target, pname, params):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glGetPixelTransformParameterfvEXT(target, pname, params):
pass
| 23.71875
| 61
| 0.716733
| 83
| 759
| 6.554217
| 0.216867
| 0.242647
| 0.25
| 0.165441
| 0.571691
| 0.571691
| 0.571691
| 0.571691
| 0.494485
| 0.477941
| 0
| 0
| 0.100132
| 759
| 31
| 62
| 24.483871
| 0.796486
| 0
| 0
| 0.631579
| 0
| 0
| 0.147952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0.315789
| 0.052632
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5d2152400331e22715b7e54e3006ff1b5456dbf6
| 335
|
py
|
Python
|
Pacotes/pacote_v5.py
|
VictorMello1993/CursoPythonUdemy
|
d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d
|
[
"MIT"
] | null | null | null |
Pacotes/pacote_v5.py
|
VictorMello1993/CursoPythonUdemy
|
d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d
|
[
"MIT"
] | 4
|
2021-04-08T21:54:09.000Z
|
2022-02-10T14:35:13.000Z
|
Pacotes/pacote_v5.py
|
VictorMello1993/CursoPythonUdemy
|
d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d
|
[
"MIT"
] | null | null | null |
#Importando um pacote que representa uma "fachada" de outros pacotes
from calc import soma, subtracao
print('Soma', soma(3, 2))
print('Subtração', subtracao(3, 2))
'''Nesse exemplo foi possível acessar um pacote que contém funções do módulo
"modulo1" do pacote "pacote1" e do pacote "pacote2", todos eles agrupados
numa "fachada"'''
| 33.5
| 76
| 0.752239
| 50
| 335
| 5.04
| 0.72
| 0.063492
| 0.087302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.143284
| 335
| 9
| 77
| 37.222222
| 0.853659
| 0.2
| 0
| 0
| 0
| 0
| 0.131313
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
5d44df94aba262d7091dadfccd383083da0a4799
| 35
|
py
|
Python
|
dipy/utils/tests/__init__.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
dipy/utils/tests/__init__.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
dipy/utils/tests/__init__.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 3
|
2016-08-05T22:43:16.000Z
|
2017-06-23T18:35:13.000Z
|
# Tests for utilities - as package
| 17.5
| 34
| 0.742857
| 5
| 35
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 1
| 35
| 35
| 0.928571
| 0.914286
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d5ce229ec78c15cd580a9bb0ca02a2c952d8cad
| 69
|
py
|
Python
|
example10.py
|
126alexander/LAB_15
|
70ae009d9b8722c5235f1aea0c6649d64fbfd610
|
[
"MIT"
] | null | null | null |
example10.py
|
126alexander/LAB_15
|
70ae009d9b8722c5235f1aea0c6649d64fbfd610
|
[
"MIT"
] | null | null | null |
example10.py
|
126alexander/LAB_15
|
70ae009d9b8722c5235f1aea0c6649d64fbfd610
|
[
"MIT"
] | null | null | null |
import os
# deleting the file named file3.txt
os.remove("file3.txt")
| 17.25
| 35
| 0.753623
| 12
| 69
| 4.333333
| 0.75
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.130435
| 69
| 4
| 36
| 17.25
| 0.833333
| 0.478261
| 0
| 0
| 0
| 0
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9904c53bf229753b8e032318dbc6230d64b6da15
| 72
|
py
|
Python
|
mlbench_core/models/pytorch/layers/__init__.py
|
mlbench/mlbench-core
|
4fd3c7e6f1a5be69e52383ab2eb64cad257218c2
|
[
"Apache-2.0"
] | 14
|
2018-11-12T17:23:32.000Z
|
2022-03-11T22:45:49.000Z
|
mlbench_core/models/pytorch/layers/__init__.py
|
mlbench/mlbench-core
|
4fd3c7e6f1a5be69e52383ab2eb64cad257218c2
|
[
"Apache-2.0"
] | 330
|
2018-10-09T12:15:56.000Z
|
2022-03-01T18:07:40.000Z
|
mlbench_core/models/pytorch/layers/__init__.py
|
mlbench/mlbench-core
|
4fd3c7e6f1a5be69e52383ab2eb64cad257218c2
|
[
"Apache-2.0"
] | 9
|
2019-02-28T19:11:58.000Z
|
2020-08-17T17:52:37.000Z
|
from .dropout_layers import LockedDropout, WeightDrop, embedded_dropout
| 36
| 71
| 0.875
| 8
| 72
| 7.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 72
| 1
| 72
| 72
| 0.924242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4ad86ff106a5b20030f4c1493db9a52d1b48aad9
| 223
|
py
|
Python
|
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | 1
|
2019-04-07T23:14:29.000Z
|
2019-04-07T23:14:29.000Z
|
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | null | null | null |
oop/class_testing.py
|
karinakozarova/Learning-Python
|
217dfc8ca6931a238445daf0b84e188c02916c52
|
[
"MIT"
] | null | null | null |
class Car:
def __init__(self,color,model):
self.color = color
self.model = model
def print(self):
return self.color + " " + self.model
car1 = Car("green","Toyota")
print(car1.print())
| 18.583333
| 44
| 0.578475
| 28
| 223
| 4.464286
| 0.428571
| 0.216
| 0.224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012422
| 0.278027
| 223
| 12
| 45
| 18.583333
| 0.763975
| 0
| 0
| 0
| 0
| 0
| 0.053571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
4af333cf47298f73041d9ce4b747a83c9c047c08
| 98
|
py
|
Python
|
src/heos/__init__.py
|
jrderuiter/heos
|
c657913cd5d135f63ce78565875e02eb2f568962
|
[
"MIT"
] | 2
|
2021-01-08T16:01:27.000Z
|
2021-02-12T12:53:33.000Z
|
src/heos/__init__.py
|
jrderuiter/heos
|
c657913cd5d135f63ce78565875e02eb2f568962
|
[
"MIT"
] | null | null | null |
src/heos/__init__.py
|
jrderuiter/heos
|
c657913cd5d135f63ce78565875e02eb2f568962
|
[
"MIT"
] | null | null | null |
from .client import Client
from .player import Player, PlayerGroup
from .registry import Registry
| 24.5
| 39
| 0.826531
| 13
| 98
| 6.230769
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 3
| 40
| 32.666667
| 0.952941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab158f14bce1164abf196be57d68c5974988fec7
| 3,047
|
py
|
Python
|
server/openapi_server/models/__init__.py
|
mintproject/MINT-ModelCatalogIngestionAPI
|
026d3495483a3e48ea3c1364d0dda09beeea69e4
|
[
"Apache-2.0"
] | 2
|
2019-05-30T21:33:43.000Z
|
2019-09-27T21:04:38.000Z
|
server/openapi_server/models/__init__.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | 82
|
2019-10-08T16:35:34.000Z
|
2022-03-15T18:25:27.000Z
|
server/openapi_server/models/__init__.py
|
mintproject/model-catalog-api
|
2ad7016691891497bba37afe8ceb0fea8fe769e5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from openapi_server.models.catalog_identifier import CatalogIdentifier
from openapi_server.models.causal_diagram import CausalDiagram
from openapi_server.models.configuration_setup import ConfigurationSetup
from openapi_server.models.constraint import Constraint
from openapi_server.models.coupled_model import CoupledModel
from openapi_server.models.data_transformation import DataTransformation
from openapi_server.models.data_transformation_setup import DataTransformationSetup
from openapi_server.models.dataset_specification import DatasetSpecification
from openapi_server.models.empirical_model import EmpiricalModel
from openapi_server.models.emulator import Emulator
from openapi_server.models.equation import Equation
from openapi_server.models.funding_information import FundingInformation
from openapi_server.models.geo_coordinates import GeoCoordinates
from openapi_server.models.geo_shape import GeoShape
from openapi_server.models.grid import Grid
from openapi_server.models.hybrid_model import HybridModel
from openapi_server.models.image import Image
from openapi_server.models.intervention import Intervention
from openapi_server.models.model import Model
from openapi_server.models.model_category import ModelCategory
from openapi_server.models.model_configuration import ModelConfiguration
from openapi_server.models.model_configuration_setup import ModelConfigurationSetup
from openapi_server.models.numerical_index import NumericalIndex
from openapi_server.models.organization import Organization
from openapi_server.models.parameter import Parameter
from openapi_server.models.person import Person
from openapi_server.models.point_based_grid import PointBasedGrid
from openapi_server.models.process import Process
from openapi_server.models.region import Region
from openapi_server.models.sample_collection import SampleCollection
from openapi_server.models.sample_execution import SampleExecution
from openapi_server.models.sample_resource import SampleResource
from openapi_server.models.software import Software
from openapi_server.models.software_configuration import SoftwareConfiguration
from openapi_server.models.software_image import SoftwareImage
from openapi_server.models.software_version import SoftwareVersion
from openapi_server.models.source_code import SourceCode
from openapi_server.models.spatial_resolution import SpatialResolution
from openapi_server.models.spatially_distributed_grid import SpatiallyDistributedGrid
from openapi_server.models.standard_variable import StandardVariable
from openapi_server.models.theory_guided_model import TheoryGuidedModel
from openapi_server.models.time_interval import TimeInterval
from openapi_server.models.unit import Unit
from openapi_server.models.user import User
from openapi_server.models.variable import Variable
from openapi_server.models.variable_presentation import VariablePresentation
from openapi_server.models.visualization import Visualization
| 57.490566
| 85
| 0.899902
| 379
| 3,047
| 7.005277
| 0.266491
| 0.194727
| 0.300942
| 0.407156
| 0.205273
| 0.06177
| 0
| 0
| 0
| 0
| 0
| 0.000704
| 0.067279
| 3,047
| 52
| 86
| 58.596154
| 0.933498
| 0.019363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab350b87bb10980d6dc5033bd97c6e224e09e86b
| 30
|
py
|
Python
|
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 1
|
2021-01-25T15:57:58.000Z
|
2021-01-25T15:57:58.000Z
|
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | null | null | null |
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 2
|
2020-10-17T16:36:02.000Z
|
2021-01-24T10:20:05.000Z
|
from . import account_payment
| 15
| 29
| 0.833333
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ab4bb081527c39f98f2ab904b5ca44bca085d968
| 214
|
py
|
Python
|
userlixo/database/__init__.py
|
AndrielFR/UserLixo
|
5755b39e2bc62f72e9f76ee442b7c07f2ae4adeb
|
[
"MIT"
] | 65
|
2018-11-12T02:56:01.000Z
|
2022-03-09T00:57:05.000Z
|
userlixo/database/__init__.py
|
AndrielFR/UserLixo
|
5755b39e2bc62f72e9f76ee442b7c07f2ae4adeb
|
[
"MIT"
] | 93
|
2019-11-22T23:54:26.000Z
|
2022-03-31T00:48:14.000Z
|
userlixo/database/__init__.py
|
HitaloSama/UserLixo
|
85ef00cfc828ad6a6a28bd3c80eea07e0c4fc45a
|
[
"MIT"
] | 56
|
2018-12-16T17:13:38.000Z
|
2022-03-30T18:40:07.000Z
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2021 Amano Team
from typing import List
from .database import Config, Message, connect_database
__all__: List[str] = ["Config", "connect_database", "Message"]
| 23.777778
| 62
| 0.752336
| 28
| 214
| 5.535714
| 0.714286
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043011
| 0.130841
| 214
| 8
| 63
| 26.75
| 0.790323
| 0.294393
| 0
| 0
| 0
| 0
| 0.195946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db6d0e2a5854beec09b03f3e4ba9ea910bcd50fe
| 53,657
|
py
|
Python
|
test/kb_util_dylan_server_test.py
|
kbaseapps/kb_util_dylan
|
a6fd260b514455447260b64cf4294957283fbd1e
|
[
"MIT"
] | null | null | null |
test/kb_util_dylan_server_test.py
|
kbaseapps/kb_util_dylan
|
a6fd260b514455447260b64cf4294957283fbd1e
|
[
"MIT"
] | 2
|
2017-11-13T20:48:46.000Z
|
2019-08-14T20:17:05.000Z
|
test/kb_util_dylan_server_test.py
|
kbaseapps/kb_util_dylan
|
a6fd260b514455447260b64cf4294957283fbd1e
|
[
"MIT"
] | 2
|
2017-04-10T23:02:46.000Z
|
2017-08-16T23:12:16.000Z
|
import unittest
import os
import json
import time
import requests
import shutil
from os import environ
from ConfigParser import ConfigParser
from requests_toolbelt import MultipartEncoder
from pprint import pprint
from Workspace.WorkspaceClient import Workspace as workspaceService
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
from kb_util_dylan.kb_util_dylanImpl import kb_util_dylan
from ReadsUtils.ReadsUtilsClient import ReadsUtils
from AssemblyUtil.AssemblyUtilClient import AssemblyUtil
class kb_util_dylanTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
cls.token = token
cls.ctx = {'token': token, 'provenance': [{'service': 'kb_util_dylan',
'method': 'please_never_use_it_in_production', 'method_params': []}],
'authenticated': 1}
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_util_dylan'):
print(nameval[0] + '=' + nameval[1])
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.shockURL = cls.cfg['shock-url']
cls.handleURL = cls.cfg['handle-service-url']
cls.serviceWizardURL = cls.cfg['service-wizard-url']
cls.callbackURL = os.environ['SDK_CALLBACK_URL']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = kb_util_dylan(cls.cfg)
cls.scratch = os.path.abspath(cls.cfg['scratch'])
if not os.path.exists(cls.scratch):
os.makedirs(cls.scratch)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'shock_ids'):
for shock_id in cls.shock_ids:
print('Deleting SHOCK node: '+str(shock_id))
cls.delete_shock_node(shock_id)
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print('Deleted shock node ' + node_id)
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_kb_util_dylan_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# call this method to get the WS object info of a Single End Library (will
# upload the example data if this is the first time the method is called during tests)
def getSingleEndLibInfo(self, read_lib_basename, lib_i=0):
if hasattr(self.__class__, 'singleEndLibInfo_list'):
try:
info = self.__class__.singleEndLibInfo_list[lib_i]
name = self.__class__.singleEndLibName_list[lib_i]
if info != None:
if name != read_lib_basename:
self.__class__.singleEndLibInfo_list[lib_i] = None
self.__class__.singleEndLibName_list[lib_i] = None
else:
return info
except:
pass
# 1) upload files to shock
shared_dir = "/kb/module/work/tmp"
forward_data_file = 'data/'+read_lib_basename+'.fwd.fq'
forward_file = os.path.join(shared_dir, os.path.basename(forward_data_file))
shutil.copy(forward_data_file, forward_file)
ru = ReadsUtils(os.environ['SDK_CALLBACK_URL'])
single_end_ref = ru.upload_reads({'fwd_file': forward_file,
'sequencing_tech': 'artificial reads',
'wsname': self.getWsName(),
'name': 'test-'+str(lib_i)+'.se.reads'})['obj_ref']
new_obj_info = self.getWsClient().get_object_info_new({'objects': [{'ref': single_end_ref}]})[0]
# store it
if not hasattr(self.__class__, 'singleEndLibInfo_list'):
self.__class__.singleEndLibInfo_list = []
self.__class__.singleEndLibName_list = []
for i in range(lib_i+1):
try:
assigned = self.__class__.singleEndLibInfo_list[i]
except:
self.__class__.singleEndLibInfo_list.append(None)
self.__class__.singleEndLibName_list.append(None)
self.__class__.singleEndLibInfo_list[lib_i] = new_obj_info
self.__class__.singleEndLibName_list[lib_i] = read_lib_basename
return new_obj_info
# call this method to get the WS object info of a Paired End Library (will
# upload the example data if this is the first time the method is called during tests)
def getPairedEndLibInfo(self, read_lib_basename, lib_i=0):
if hasattr(self.__class__, 'pairedEndLibInfo_list'):
try:
info = self.__class__.pairedEndLibInfo_list[lib_i]
name = self.__class__.pairedEndLibName_list[lib_i]
if info != None:
if name != read_lib_basename:
self.__class__.pairedEndLibInfo_list[lib_i] = None
self.__class__.pairedEndLibName_list[lib_i] = None
else:
return info
except:
pass
# 1) upload files to shock
shared_dir = "/kb/module/work/tmp"
forward_data_file = 'data/'+read_lib_basename+'.fwd.fq'
forward_file = os.path.join(shared_dir, os.path.basename(forward_data_file))
shutil.copy(forward_data_file, forward_file)
reverse_data_file = 'data/'+read_lib_basename+'.rev.fq'
reverse_file = os.path.join(shared_dir, os.path.basename(reverse_data_file))
shutil.copy(reverse_data_file, reverse_file)
ru = ReadsUtils(os.environ['SDK_CALLBACK_URL'])
paired_end_ref = ru.upload_reads({'fwd_file': forward_file, 'rev_file': reverse_file,
'sequencing_tech': 'artificial reads',
'interleaved': 0, 'wsname': self.getWsName(),
'name': 'test-'+str(lib_i)+'.pe.reads'})['obj_ref']
new_obj_info = self.getWsClient().get_object_info_new({'objects': [{'ref': paired_end_ref}]})[0]
# store it
if not hasattr(self.__class__, 'pairedEndLibInfo_list'):
self.__class__.pairedEndLibInfo_list = []
self.__class__.pairedEndLibName_list = []
for i in range(lib_i+1):
try:
assigned = self.__class__.pairedEndLibInfo_list[i]
except:
self.__class__.pairedEndLibInfo_list.append(None)
self.__class__.pairedEndLibName_list.append(None)
self.__class__.pairedEndLibInfo_list[lib_i] = new_obj_info
self.__class__.pairedEndLibName_list[lib_i] = read_lib_basename
return new_obj_info
# call this method to get the WS object info of a Single End Library Set (will
# upload the example data if this is the first time the method is called during tests)
def getSingleEndLib_SetInfo(self, read_libs_basename_list, refresh=False):
if hasattr(self.__class__, 'singleEndLib_SetInfo'):
try:
info = self.__class__.singleEndLib_SetInfo
if info != None:
if refresh:
self.__class__.singleEndLib_SetInfo = None
else:
return info
except:
pass
# build items and save each SingleEndLib
items = []
for lib_i,read_lib_basename in enumerate (read_libs_basename_list):
label = read_lib_basename
lib_info = self.getSingleEndLibInfo (read_lib_basename, lib_i)
lib_ref = str(lib_info[6])+'/'+str(lib_info[0])+'/'+str(lib_info[4])
print ("LIB_REF["+str(lib_i)+"]: "+lib_ref+" "+read_lib_basename) # DEBUG
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
# save readsset
desc = 'test ReadsSet'
readsSet_obj = { 'description': desc,
'items': items
}
name = 'TEST_READSET'
new_obj_set_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_util_dylan',
'method':'test_kb_util_dylan'
}
]
}]
})[0]
# store it
self.__class__.singleEndLib_SetInfo = new_obj_set_info
return new_obj_set_info
# call this method to get the WS object info of a Paired End Library Set (will
# upload the example data if this is the first time the method is called during tests)
def getPairedEndLib_SetInfo(self, read_libs_basename_list, refresh=False):
if hasattr(self.__class__, 'pairedEndLib_SetInfo'):
try:
info = self.__class__.pairedEndLib_SetInfo
if info != None:
if refresh:
self.__class__.pairedEndLib_SetInfo = None
else:
return info
except:
pass
# build items and save each PairedEndLib
items = []
for lib_i,read_lib_basename in enumerate (read_libs_basename_list):
label = read_lib_basename
lib_info = self.getPairedEndLibInfo (read_lib_basename, lib_i)
lib_ref = str(lib_info[6])+'/'+str(lib_info[0])+'/'+str(lib_info[4])
lib_type = str(lib_info[2])
print ("LIB_REF["+str(lib_i)+"]: "+lib_ref+" "+read_lib_basename) # DEBUG
print ("LIB_TYPE["+str(lib_i)+"]: "+lib_type+" "+read_lib_basename) # DEBUG
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
# save readsset
desc = 'test ReadsSet'
readsSet_obj = { 'description': desc,
'items': items
}
name = 'TEST_READSET'
new_obj_set_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_util_dylan',
'method':'test_kb_util_dylan'
}
]
}]
})[0]
# store it
self.__class__.pairedEndLib_SetInfo = new_obj_set_info
return new_obj_set_info
##############
# UNIT TESTS #
##############
#### test_KButil_FASTQ_to_FASTA():
##
def test_KButil_FASTQ_to_FASTA (self):
method = 'KButil_FASTQ_to_FASTA'
print ("\n\nRUNNING: test_KButil_FASTQ_to_FASTA()")
print ("=====================================\n\n")
# figure out where the test data lives
se_lib_info = self.getSingleEndLibInfo('test_quick')
pprint(se_lib_info)
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': str(se_lib_info[6])+'/'+str(se_lib_info[0]),
'output_name': base_output_name
}
result = self.getImpl().KButil_FASTQ_to_FASTA(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseFile.SingleEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':se_lib_info[7] + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
pass
#### test_KButil_Merge_FeatureSet_Collection():
##
def test_KButil_Merge_FeatureSet_Collection (self):
method = 'KButil_Merge_FeatureSet_Collection'
print ("\n\nRUNNING: test_KButil_Merge_FeatureSet_Collection()")
print ("==================================================\n\n")
# input_data
reference_prok_genomes_WS = 'ReferenceDataManager' # PROD and CI
genome_ref_1 = reference_prok_genomes_WS+'/GCF_001566335.1/1' # E. coli K-12 MG1655
genome_ref_2 = reference_prok_genomes_WS+'/GCF_000021385.1/1' # D. vulgaris str. 'Miyazaki F'
genome_ref_3 = reference_prok_genomes_WS+'/GCF_900129775.1/1' # Halobaculum gomorrense (16 contigs)
genome_id_feature_id_delim = '.f:'
feature_id_1 = 'AWN69_RS07145'
feature_id_2 = 'DVMF_RS00005'
feature_id_3 = 'BUE16_RS15805'
# featureSet 1
featureSet_obj_1 = { 'description': 'test featureSet 1',
'element_ordering': [
feature_id_1,
feature_id_2
],
'elements': {
feature_id_1: [genome_ref_1],
feature_id_2: [genome_ref_2]
}
}
provenance = [{}]
featureSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseCollections.FeatureSet',
'data': featureSet_obj_1,
'name': 'test_featureSet_1',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
featureSet_ref_1 = str(featureSet_info[WSID_I])+'/'+str(featureSet_info[OBJID_I])+'/'+str(featureSet_info[VERSION_I])
# featureSet 2
featureSet_obj_2 = { 'description': 'test featureSet 2',
'element_ordering': [
feature_id_3
],
'elements': {
feature_id_3: [genome_ref_3]
}
}
provenance = [{}]
featureSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseCollections.FeatureSet',
'data': featureSet_obj_2,
'name': 'test_featureSet_2',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
featureSet_ref_2 = str(featureSet_info[WSID_I])+'/'+str(featureSet_info[OBJID_I])+'/'+str(featureSet_info[VERSION_I])
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': [featureSet_ref_1, featureSet_ref_2],
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Merge_FeatureSet_Collection(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseCollections.FeatureSet'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['element_ordering']),3)
pass
#### test_KButil_Merge_GenomeSets():
##
def test_KButil_Merge_GenomeSets (self):
method = 'KButil_Merge_GenomeSets'
print ("\n\nRUNNING: test_KButil_Merge_GenomeSets()")
print ("=======================================\n\n")
# input_data
reference_prok_genomes_WS = 'ReferenceDataManager' # PROD and CI
genome_ref_1 = reference_prok_genomes_WS+'/GCF_001566335.1/1' # E. coli K-12 MG1655
genome_ref_2 = reference_prok_genomes_WS+'/GCF_000021385.1/1' # D. vulgaris str. 'Miyazaki F'
genome_ref_3 = reference_prok_genomes_WS+'/GCF_900129775.1/1' # Halobaculum gomorrense (16 contigs)
#genome_id_feature_id_delim = '.f:'
#feature_id_1 = 'AWN69_RS07145'
#feature_id_2 = 'DVMF_RS00005'
#feature_id_3 = 'BUE16_RS15805'
# GenomeSet 1
genomeSet_obj_1 = { 'description': 'test genomeSet 1',
'elements': { 'genome_1': { 'ref': genome_ref_1 },
'genome_2': { 'ref': genome_ref_2 }
}
}
provenance = [{}]
genomeSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseSearch.GenomeSet',
'data': genomeSet_obj_1,
'name': 'test_genomeSet_1',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
genomeSet_ref_1 = str(genomeSet_info[WSID_I])+'/'+str(genomeSet_info[OBJID_I])+'/'+str(genomeSet_info[VERSION_I])
# GenomeSet 2
genomeSet_obj_2 = { 'description': 'test genomeSet 2',
'elements': { 'genome_3': { 'ref': genome_ref_3 }
}
}
provenance = [{}]
genomeSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseSearch.GenomeSet',
'data': genomeSet_obj_2,
'name': 'test_genomeSet_2',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
genomeSet_ref_2 = str(genomeSet_info[WSID_I])+'/'+str(genomeSet_info[OBJID_I])+'/'+str(genomeSet_info[VERSION_I])
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': [genomeSet_ref_1, genomeSet_ref_2],
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Merge_GenomeSets(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSearch.GenomeSet'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['elements'].keys()),3)
pass
#### test_KButil_Build_GenomeSet():
##
def test_KButil_Build_GenomeSet (self):
method = 'KButil_Build_GenomeSet'
print ("\n\nRUNNING: test_KButil_Build_GenomeSet()")
print ("======================================\n\n")
# input_data
reference_prok_genomes_WS = 'ReferenceDataManager' # PROD and CI
genome_ref_1 = reference_prok_genomes_WS+'/GCF_001566335.1/1' # E. coli K-12 MG1655
genome_ref_2 = reference_prok_genomes_WS+'/GCF_000021385.1/1' # D. vulgaris str. 'Miyazaki F'
genome_ref_3 = reference_prok_genomes_WS+'/GCF_900129775.1/1' # Halobaculum gomorrense (16 contigs)
#genome_id_feature_id_delim = '.f:'
#feature_id_1 = 'AWN69_RS07145'
#feature_id_2 = 'DVMF_RS00005'
#feature_id_3 = 'BUE16_RS15805'
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': [genome_ref_1, genome_ref_2, genome_ref_3],
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Build_GenomeSet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSearch.GenomeSet'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['elements'].keys()),3)
pass
#### test_KButil_Build_GenomeSet_from_FeatureSet():
##
def test_KButil_Build_GenomeSet_from_FeatureSet (self):
method = 'KButil_Build_GenomeSet_from_FeatureSet'
print ("\n\nRUNNING: test_KButil_Build_GenomeSet_from_FeatureSet()")
print ("======================================================\n\n")
# input_data
reference_prok_genomes_WS = 'ReferenceDataManager' # PROD and CI
genome_ref_1 = reference_prok_genomes_WS+'/GCF_001566335.1/1' # E. coli K-12 MG1655
genome_ref_2 = reference_prok_genomes_WS+'/GCF_000021385.1/1' # D. vulgaris str. 'Miyazaki F'
genome_ref_3 = reference_prok_genomes_WS+'/GCF_900129775.1/1' # Halobaculum gomorrense (16 contigs)
genome_id_feature_id_delim = '.f:'
feature_id_1 = 'AWN69_RS07145'
feature_id_2 = 'DVMF_RS00005'
feature_id_3 = 'BUE16_RS15805'
# featureSet
featureSet_obj = { 'description': 'test featureSet',
'element_ordering': [
feature_id_1,
feature_id_2,
feature_id_3
],
'elements': {
feature_id_1: [genome_ref_1],
feature_id_2: [genome_ref_2],
feature_id_3: [genome_ref_3]
}
}
provenance = [{}]
featureSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseCollections.FeatureSet',
'data': featureSet_obj,
'name': 'test_featureSet',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
featureSet_ref = str(featureSet_info[WSID_I])+'/'+str(featureSet_info[OBJID_I])+'/'+str(featureSet_info[VERSION_I])
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': featureSet_ref,
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Build_GenomeSet_from_FeatureSet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSearch.GenomeSet'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['elements'].keys()),3)
pass
#### test_KButil_Add_Genomes_to_GenomeSet():
##
def test_KButil_Add_Genomes_to_GenomeSet (self):
method = 'KButil_Add_Genomes_to_GenomeSet'
print ("\n\nRUNNING: test_KButil_Add_Genomes_to_GenomeSet()")
print ("===============================================\n\n")
# input_data
reference_prok_genomes_WS = 'ReferenceDataManager' # PROD and CI
genome_ref_1 = reference_prok_genomes_WS+'/GCF_001566335.1/1' # E. coli K-12 MG1655
genome_ref_2 = reference_prok_genomes_WS+'/GCF_000021385.1/1' # D. vulgaris str. 'Miyazaki F'
genome_ref_3 = reference_prok_genomes_WS+'/GCF_900129775.1/1' # Halobaculum gomorrense (16 contigs)
#genome_id_feature_id_delim = '.f:'
#feature_id_1 = 'AWN69_RS07145'
#feature_id_2 = 'DVMF_RS00005'
#feature_id_3 = 'BUE16_RS15805'
# GenomeSet 1
genomeSet_obj_1 = { 'description': 'test genomeSet 1',
'elements': { 'genome_1': { 'ref': genome_ref_1 }
}
}
provenance = [{}]
genomeSet_info = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseSearch.GenomeSet',
'data': genomeSet_obj_1,
'name': 'test_genomeSet_1',
'meta': {},
'provenance': provenance
}
]})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
genomeSet_ref_1 = str(genomeSet_info[WSID_I])+'/'+str(genomeSet_info[OBJID_I])+'/'+str(genomeSet_info[VERSION_I])
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_genome_refs': [genome_ref_2, genome_ref_3],
'input_genomeset_ref': genomeSet_ref_1,
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Add_Genomes_to_GenomeSet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSearch.GenomeSet'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['elements'].keys()),3)
pass
#### test_KButil_Concat_MSAs():
##
def test_KButil_Concat_MSAs (self):
method = 'KButil_Concat_MSAs'
print ("\n\nRUNNING: test_KButil_Concat_MSAs()")
print ("==================================\n\n")
# MSA
MSA_json_file = os.path.join('data', 'DsrA.MSA.json')
with open (MSA_json_file, 'r', 0) as MSA_json_fh:
MSA_obj = json.load(MSA_json_fh)
provenance = [{}]
MSA_info_list = self.getWsClient().save_objects({
'workspace': self.getWsName(),
'objects': [
{
'type': 'KBaseTrees.MSA',
'data': MSA_obj,
'name': 'test_MSA_1',
'meta': {},
'provenance': provenance
},
{
'type': 'KBaseTrees.MSA',
'data': MSA_obj,
'name': 'test_MSA_2',
'meta': {},
'provenance': provenance
},
{
'type': 'KBaseTrees.MSA',
'data': MSA_obj,
'name': 'test_MSA_3',
'meta': {},
'provenance': provenance
}
]})
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
MSA_ref_1 = str(MSA_info_list[0][WSID_I])+'/'+str(MSA_info_list[0][OBJID_I])+'/'+str(MSA_info_list[0][VERSION_I])
MSA_ref_2 = str(MSA_info_list[1][WSID_I])+'/'+str(MSA_info_list[1][OBJID_I])+'/'+str(MSA_info_list[1][VERSION_I])
MSA_ref_3 = str(MSA_info_list[2][WSID_I])+'/'+str(MSA_info_list[2][OBJID_I])+'/'+str(MSA_info_list[2][VERSION_I])
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': [MSA_ref_1, MSA_ref_2, MSA_ref_3],
'output_name': base_output_name,
'desc': 'test'
}
result = self.getImpl().KButil_Concat_MSAs(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseTrees.MSA'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['row_order']), len(MSA_obj['row_order']))
self.assertEqual(output_obj['alignment_length'], 3*MSA_obj['alignment_length'])
pass
#### test_KButil_Build_ReadsSet()
##
def test_KButil_Build_ReadsSet (self):
method = 'KButil_Build_ReadsSet'
print ("\n\nRUNNING: test_KButil_Build_ReadsSet()")
print ("=====================================\n\n")
# figure out where the test data lives
pe_lib_info_1 = self.getPairedEndLibInfo('test_quick', lib_i=0)
pprint(pe_lib_info_1)
pe_lib_info_2 = self.getPairedEndLibInfo('small', lib_i=1)
pprint(pe_lib_info_2)
pe_lib_info_3 = self.getPairedEndLibInfo('small_2',lib_i=2)
pprint(pe_lib_info_3)
# run method
input_refs = [ str(pe_lib_info_1[6])+'/'+str(pe_lib_info_1[0]),
str(pe_lib_info_2[6])+'/'+str(pe_lib_info_2[0]),
str(pe_lib_info_3[6])+'/'+str(pe_lib_info_3[0])
]
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs,
'output_name': base_output_name,
'desc':'test build readsSet'
}
result = self.getImpl().KButil_Build_ReadsSet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSets.ReadsSet'
output_ref = self.getWsName() + '/' + output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['items']), 3)
pass
#### test_KButil_Split_Reads():
##
def test_KButil_Split_Reads (self):
method = 'KButil_Split_Reads'
print ("\n\nRUNNING: test_KButil_Split_Reads()")
print ("==================================\n\n")
# figure out where the test data lives
pe_lib_info = self.getPairedEndLibInfo('small_2')
pprint(pe_lib_info)
# Object Info Contents
# 0 - obj_id objid
# 1 - obj_name name
# 2 - type_string type
# 3 - timestamp save_date
# 4 - int version
# 5 - username saved_by
# 6 - ws_id wsid
# 7 - ws_name workspace
# 8 - string chsum
# 9 - int size
# 10 - usermeta meta
# run method
split_num = 4
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': str(pe_lib_info[6])+'/'+str(pe_lib_info[0]),
'split_num': split_num,
'output_name': base_output_name,
'desc':'test split'
}
result = self.getImpl().KButil_Split_Reads(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name+'_paired-0'
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':pe_lib_info[7] + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
output_name = base_output_name+'_paired-'+str(split_num-1)
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':pe_lib_info[7] + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
pass
#### test_KButil_Random_Subsample_Reads():
##
def test_KButil_Random_Subsample_Reads (self):
method = 'KButil_Random_Subsample_Reads'
print ("\n\nRUNNING: test_KButil_Random_Subsample_Reads()")
print ("=============================================\n\n")
# figure out where the test data lives
pe_lib_info = self.getPairedEndLibInfo('small_2')
pprint(pe_lib_info)
# run method
split_num = 4
reads_num = 2500
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': str(pe_lib_info[6])+'/'+str(pe_lib_info[0]),
'subsample_fraction': {'split_num': split_num,
'reads_num': reads_num
},
'output_name': base_output_name,
'desc':'test random subsample',
'seed': 1
}
result = self.getImpl().KButil_Random_Subsample_Reads(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name+'_paired-0'
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':pe_lib_info[7] + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
output_name = base_output_name+'_paired-'+str(split_num-1)
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':pe_lib_info[7] + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
pass
#### test_KButil_Merge_ReadsSet_to_OneLibrary()
##
def test_KButil_Merge_ReadsSet_to_OneLibrary (self):
method = 'KButil_Merge_ReadsSet_to_OneLibrary'
print ("\n\nRUNNING: test_KButil_Merge_ReadsSet_to_OneLibrary()")
print ("===================================================\n\n")
# figure out where the test data lives
pe_lib_set_info = self.getPairedEndLib_SetInfo(['test_quick','small_2'])
pprint(pe_lib_set_info)
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': str(pe_lib_set_info[6])+'/'+str(pe_lib_set_info[0]),
'output_name': base_output_name,
'desc':'test merge'
}
result = self.getImpl().KButil_Merge_ReadsSet_to_OneLibrary(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':self.getWsName() + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
pass
#### test_KButil_Merge_MultipleReadsLibs_to_OneLibrary()
##
def test_KButil_Merge_MultipleReadsLibs_to_OneLibrary (self):
method = 'KButil_Merge_MultipleReadsLibs_to_OneLibrary'
print ("\n\nRUNNING: test_KButil_Merge_MultipleReadsLibs_to_OneLibrary()")
print ("============================================================\n\n")
# figure out where the test data lives
pe_lib_info_1 = self.getPairedEndLibInfo('test_quick', lib_i=0)
pprint(pe_lib_info_1)
pe_lib_info_2 = self.getPairedEndLibInfo('small', lib_i=1)
pprint(pe_lib_info_2)
pe_lib_info_3 = self.getPairedEndLibInfo('small_2',lib_i=2)
pprint(pe_lib_info_3)
# run method
input_refs = [ str(pe_lib_info_1[6])+'/'+str(pe_lib_info_1[0]),
str(pe_lib_info_2[6])+'/'+str(pe_lib_info_2[0]),
str(pe_lib_info_3[6])+'/'+str(pe_lib_info_3[0])
]
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs,
'output_name': base_output_name,
'desc':'test merge'
}
result = self.getImpl().KButil_Merge_MultipleReadsLibs_to_OneLibrary(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseFile.PairedEndLibrary'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':self.getWsName() + '/' + output_name}]})
self.assertEqual(len(info_list),1)
readsLib_info = info_list[0]
self.assertEqual(readsLib_info[1],output_name)
self.assertEqual(readsLib_info[2].split('-')[0],output_type)
pass
#### test_KButil_Merge_MultipleReadsSets_to_OneReadsSet()
##
def test_KButil_Merge_MultipleReadsSets_to_OneReadsSet (self):
method = 'KButil_Merge_MultipleReadsSets_to_OneReadsSet'
print ("\n\nRUNNING: test_KButil_Merge_MultipleReadsSetss_to_OneReadsSet()")
print ("==============================================================\n\n")
# figure out where the test data lives
lib_basenames = ['test_quick', 'small', 'small_2']
pe_lib_info = []
lib_refs = []
for lib_i,lib_basename in enumerate(lib_basenames):
this_info = self.getPairedEndLibInfo(lib_basename, lib_i=lib_i)
pe_lib_info.append(this_info)
pprint(this_info)
lib_refs.append(str(this_info[6])+'/'+str(this_info[0])+'/'+str(this_info[4]))
# make readsSet 1
items = [ {'ref': lib_refs[0],
'label': lib_basenames[0]
},
{'ref': lib_refs[1],
'label': lib_basenames[1]
}]
desc = 'test ReadsSet 1'
readsSet_obj_1 = { 'description': desc,
'items': items
}
name = 'TEST_READSET_1'
new_obj_set_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj_1,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_util_dylan',
'method':'test_kb_util_dylan'
}
]
}]
})[0]
readsSet_ref_1 = str(new_obj_set_info[6]) +'/'+ str(new_obj_set_info[0]) +'/'+ str(new_obj_set_info[4])
# make readsSet 2
items = [ {'ref': lib_refs[2],
'label': lib_basenames[2]
}]
desc = 'test ReadsSet 2'
readsSet_obj_2 = { 'description': desc,
'items': items
}
name = 'TEST_READSET_2'
new_obj_set_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj_2,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_util_dylan',
'method':'test_kb_util_dylan'
}
]
}]
})[0]
readsSet_ref_2 = str(new_obj_set_info[6]) +'/'+ str(new_obj_set_info[0]) +'/'+ str(new_obj_set_info[4])
# run method
input_refs = [ readsSet_ref_1, readsSet_ref_2 ]
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs,
'output_name': base_output_name,
'desc':'test merge'
}
result = self.getImpl().KButil_Merge_MultipleReadsSets_to_OneReadsSet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSets.ReadsSet'
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':self.getWsName() + '/' + output_name}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_ref = self.getWsName()+'/'+output_name
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['items']),3)
pass
#### test_KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs():
##
def test_KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs (self):
method = 'KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs'
print ("\n\nRUNNING: test_KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs()")
print ("===================================================================\n\n")
# figure out where the test data lives
lib_basenames = ['test_quick','small_2','small']
pe_lib_set_info = self.getPairedEndLib_SetInfo(lib_basenames)
pprint(pe_lib_set_info)
# run method
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_ref': str(pe_lib_set_info[6])+'/'+str(pe_lib_set_info[0]),
'output_name': base_output_name,
'desc':'test hygiene'
}
result = self.getImpl().KButil_Extract_Unpaired_Reads_and_Synchronize_Pairs(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
reads_name_ext = "_paired_synched"
output_name = base_output_name + reads_name_ext
output_type = 'KBaseSets.ReadsSet'
output_ref = self.getWsName() + '/' + output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
print ('OUTPUT_OBJ:')
pprint(output_obj)
self.assertEqual(len(output_obj['items']),len(lib_basenames))
pass
#### test_KButil_Translate_ReadsLibs_QualScores()
##
def test_KButil_Translate_ReadsLibs_QualScores (self):
method = 'KButil_Translate_ReadsLibs_QualScores'
print ("\n\nRUNNING: test_KButil_Translate_ReadsLibs_QualScores()")
print ("=====================================================\n\n")
# figure out where the test data lives
lib_basenames = ['test_quick', 'small']
lib_obj_names = []
input_refs = []
for lib_i,lib_basename in enumerate(lib_basenames):
pe_lib_info = self.getPairedEndLibInfo(lib_basename+'-q64_5recs', lib_i=lib_i)
pprint(pe_lib_info)
lib_obj_names.append(str(pe_lib_info[1]))
input_refs.append(str(pe_lib_info[6])+'/'+str(pe_lib_info[0])+'/'+str(pe_lib_info[4]))
# run method
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs
}
result = self.getImpl().KButil_Translate_ReadsLibs_QualScores(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
for lib_i,lib_basename in enumerate(lib_basenames):
output_name = lib_obj_names[lib_i]+'.phred33'
output_type = 'KBaseFile.PairedEndLibrary'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
pass
#### test_KButil_AddInsertLen_to_ReadsLibs()
##
def test_KButil_AddInsertLen_to_ReadsLibs (self):
method = 'KButil_AddInsertLen_to_ReadsLibs'
print ("\n\nRUNNING: test_KButil_AddInsertLen_to_ReadsLibs()")
print ("================================================\n\n")
# figure out where the test data lives
lib_basenames = ['test_quick', 'small']
lib_obj_names = []
input_refs = []
for lib_i,lib_basename in enumerate(lib_basenames):
pe_lib_info = self.getPairedEndLibInfo(lib_basename, lib_i=lib_i)
pprint(pe_lib_info)
lib_obj_names.append(str(pe_lib_info[1]))
input_refs.append(str(pe_lib_info[6])+'/'+str(pe_lib_info[0])+'/'+str(pe_lib_info[4]))
# run method
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs,
'insert_len': '450.0',
'insert_stddev': '15.0'
}
result = self.getImpl().KButil_AddInsertLen_to_ReadsLibs(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
for lib_i,lib_basename in enumerate(lib_basenames):
output_name = lib_obj_names[lib_i]
output_type = 'KBaseFile.PairedEndLibrary'
output_ref = self.getWsName()+'/'+output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
output_info = info_list[0]
self.assertEqual(output_info[1],output_name)
self.assertEqual(output_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
print ('OUTPUT_OBJ:')
pprint(output_obj)
self.assertEqual(output_obj['insert_size_mean'],450.0)
self.assertEqual(output_obj['insert_size_std_dev'],15.0)
pass
#### test_KButil_Build_AssemblySet()
##
def test_KButil_Build_AssemblySet (self):
method = 'KButil_Build_AssemblySet'
print ("\n\nRUNNING: test_KButil_Build_AssemblySet()")
print ("========================================\n\n")
# upload test data
try:
auClient = AssemblyUtil(self.callbackURL, token=self.token)
except Exception as e:
raise ValueError('Unable to instantiate auClient with callbackURL: '+ self.callbackURL +' ERROR: ' + str(e))
ass_file_1 = 'assembly_1.fa'
ass_file_2 = 'assembly_2.fa'
ass_path_1 = os.path.join(self.scratch, ass_file_1)
ass_path_2 = os.path.join(self.scratch, ass_file_2)
shutil.copy(os.path.join("data", ass_file_1), ass_path_1)
shutil.copy(os.path.join("data", ass_file_2), ass_path_2)
ass_ref_1 = auClient.save_assembly_from_fasta({
'file': {'path': ass_path_1},
'workspace_name': self.getWsName(),
'assembly_name': 'assembly_1'
})
ass_ref_2 = auClient.save_assembly_from_fasta({
'file': {'path': ass_path_2},
'workspace_name': self.getWsName(),
'assembly_name': 'assembly_1'
})
# run method
input_refs = [ ass_ref_1, ass_ref_2 ]
base_output_name = method+'_output'
params = {
'workspace_name': self.getWsName(),
'input_refs': input_refs,
'output_name': base_output_name,
'desc':'test build assemblySet'
}
result = self.getImpl().KButil_Build_AssemblySet(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
output_name = base_output_name
output_type = 'KBaseSets.AssemblySet'
output_ref = self.getWsName() + '/' + output_name
info_list = self.getWsClient().get_object_info_new({'objects':[{'ref':output_ref}]})
self.assertEqual(len(info_list),1)
assemblySet_info = info_list[0]
self.assertEqual(assemblySet_info[1],output_name)
self.assertEqual(assemblySet_info[2].split('-')[0],output_type)
output_obj = self.getWsClient().get_objects2({'objects': [{'ref': output_ref}]})['data'][0]['data']
self.assertEqual(len(output_obj['items']), len(input_refs))
pass
| 41.594574
| 148
| 0.563412
| 5,872
| 53,657
| 4.780484
| 0.061138
| 0.042749
| 0.016031
| 0.022799
| 0.831677
| 0.785864
| 0.724235
| 0.70653
| 0.678565
| 0.662606
| 0
| 0.020172
| 0.304303
| 53,657
| 1,289
| 149
| 41.626843
| 0.731817
| 0.071212
| 0
| 0.601221
| 0
| 0
| 0.155645
| 0.052877
| 0
| 0
| 0
| 0
| 0.071211
| 1
| 0.028484
| false
| 0.021363
| 0.015259
| 0.003052
| 0.057986
| 0.095626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db809a5300d00264ec1e46e4712dbf68541ac94c
| 299
|
py
|
Python
|
cloud-computing-concepts-part1/scripts/power-usage.py
|
kgrodzicki/cloud-computing-specialization
|
7c332312e9550d76baad4db4256dacd9ee84ac00
|
[
"MIT"
] | 11
|
2016-02-20T04:48:11.000Z
|
2020-05-31T14:06:15.000Z
|
cloud-computing-concepts-part1/scripts/power-usage.py
|
kgrodzicki/cloud-computing-specialization
|
7c332312e9550d76baad4db4256dacd9ee84ac00
|
[
"MIT"
] | null | null | null |
cloud-computing-concepts-part1/scripts/power-usage.py
|
kgrodzicki/cloud-computing-specialization
|
7c332312e9550d76baad4db4256dacd9ee84ac00
|
[
"MIT"
] | 12
|
2017-11-02T02:18:18.000Z
|
2021-05-02T07:20:07.000Z
|
__author__ = 'grokrz'
# PUE = Power Usage Efficiency = Total power consumed divided by power consumed for running IT equipment.
total_power_consumed = 1600
consumed_by_running_IT_equipment = 800
print "Power Usage Efficiency PUE = " + str(total_power_consumed / consumed_by_running_IT_equipment)
| 33.222222
| 105
| 0.809365
| 41
| 299
| 5.512195
| 0.439024
| 0.230089
| 0.238938
| 0.168142
| 0.247788
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027132
| 0.137124
| 299
| 8
| 106
| 37.375
| 0.848837
| 0.344482
| 0
| 0
| 0
| 0
| 0.180412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.25
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91583ceab0eed6aed1e3205fabadd722dedfe11c
| 115
|
py
|
Python
|
app/modules/weather/get_weather_method.py
|
ASH1998/THE-BOT
|
89bed22a517b201839dfdd4cb8e75baab4ce9c0f
|
[
"MIT"
] | 10
|
2017-10-27T02:37:10.000Z
|
2021-04-08T03:02:56.000Z
|
app/modules/weather/get_weather_method.py
|
ASH1998/THE-BOT
|
89bed22a517b201839dfdd4cb8e75baab4ce9c0f
|
[
"MIT"
] | 2
|
2017-11-22T04:28:38.000Z
|
2017-11-26T19:46:50.000Z
|
app/modules/weather/get_weather_method.py
|
ASH1998/THE-BOT
|
89bed22a517b201839dfdd4cb8e75baab4ce9c0f
|
[
"MIT"
] | 9
|
2017-10-27T02:38:04.000Z
|
2021-08-18T16:50:34.000Z
|
from .ywthr import Yweather_Index
def get_the_place(place):
d = Yweather_Index()
return d.get_weather(place)
| 12.777778
| 33
| 0.765217
| 18
| 115
| 4.611111
| 0.666667
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 9
| 34
| 12.777778
| 0.846939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
91cdfe034b4e9a76cae5690ea3b4fbb14feea46d
| 148
|
py
|
Python
|
tests/api/test_config.py
|
okken/cards
|
4c7e7bd00c440d56d840b36c91b2231977a6f346
|
[
"MIT"
] | 61
|
2018-03-08T12:51:20.000Z
|
2021-08-08T08:18:19.000Z
|
tests/api/test_config.py
|
okken/cards
|
4c7e7bd00c440d56d840b36c91b2231977a6f346
|
[
"MIT"
] | 36
|
2018-03-07T17:40:15.000Z
|
2022-03-18T21:43:51.000Z
|
tests/api/test_config.py
|
okken/cards
|
4c7e7bd00c440d56d840b36c91b2231977a6f346
|
[
"MIT"
] | 28
|
2018-03-17T08:41:58.000Z
|
2022-03-03T21:54:12.000Z
|
"""
Test Cases
* `config` returns the correct database path
"""
def test_config(cards_db, tmp_db_path):
assert cards_db.path() == tmp_db_path
| 16.444444
| 44
| 0.716216
| 23
| 148
| 4.304348
| 0.565217
| 0.181818
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 148
| 8
| 45
| 18.5
| 0.798387
| 0.371622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91d8e1abcaa00d3337a2e0dca5d5c2023198f91c
| 58
|
py
|
Python
|
pytools/modules/goodgoodgenerator/__init__.py
|
CharlesPikachu/pytools
|
768078cac5f3d131630b1b40a14d112251f3bd88
|
[
"Apache-2.0"
] | 53
|
2021-12-23T03:24:34.000Z
|
2022-03-30T07:40:50.000Z
|
pytools/modules/goodgoodgenerator/__init__.py
|
CharlesPikachu/pytools
|
768078cac5f3d131630b1b40a14d112251f3bd88
|
[
"Apache-2.0"
] | 3
|
2021-12-23T14:17:58.000Z
|
2022-01-06T13:24:47.000Z
|
pytools/modules/goodgoodgenerator/__init__.py
|
CharlesPikachu/pytools
|
768078cac5f3d131630b1b40a14d112251f3bd88
|
[
"Apache-2.0"
] | 30
|
2021-12-28T07:28:48.000Z
|
2022-03-22T17:21:08.000Z
|
'''初始化'''
from .goodgoodgenerator import GoodGoodGenerator
| 29
| 48
| 0.810345
| 5
| 58
| 9.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 58
| 2
| 48
| 29
| 0.87037
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
37d1fdb86b92366a52312c20702d53398e409ab8
| 39
|
py
|
Python
|
OpenPose/openpose/__init__.py
|
qzane/AI-basketball-analysis-on-google-colab
|
edc5e9e698d59dfc2696ecdafb7a0fed78e3df09
|
[
"MIT-CMU",
"MIT"
] | 752
|
2020-05-06T04:34:48.000Z
|
2022-03-31T12:34:10.000Z
|
OpenPose/openpose/__init__.py
|
qzane/AI-basketball-analysis-on-google-colab
|
edc5e9e698d59dfc2696ecdafb7a0fed78e3df09
|
[
"MIT-CMU",
"MIT"
] | 22
|
2020-05-26T20:59:48.000Z
|
2022-03-17T02:43:15.000Z
|
OpenPose/openpose/__init__.py
|
qzane/AI-basketball-analysis-on-google-colab
|
edc5e9e698d59dfc2696ecdafb7a0fed78e3df09
|
[
"MIT-CMU",
"MIT"
] | 142
|
2020-05-07T21:35:29.000Z
|
2022-03-22T17:50:32.000Z
|
from . import pyopenpose as pyopenpose
| 19.5
| 38
| 0.820513
| 5
| 39
| 6.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 39
| 1
| 39
| 39
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
37edba08653b9b38416a5aab3c5c4bbfd5363ec2
| 47
|
py
|
Python
|
enthought/pyface/wizard/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/wizard/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/wizard/api.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.wizard.api import *
| 15.666667
| 31
| 0.765957
| 7
| 47
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 32
| 23.5
| 0.9
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.