hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e819578a7fb20ced45cf1baf562632d705725913
96
py
Python
venv/lib/python3.8/site-packages/numpy/polynomial/legendre.py
GiulianaPola/select_repeats
17a0d053d4f874e42cf654dd142168c2ec8fbd11
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/numpy/polynomial/legendre.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/numpy/polynomial/legendre.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/f2/b7/f8/4e68c73075e0aac12c5662cc4db6e9f6a8d2c5f5bf53fadd090bee02ff
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.34375
0
96
1
96
96
0.552083
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
1c569dcfc92eb93b4efc0ef0d787cffa675a0283
318
py
Python
Python/SQLite/anoka.py
programmer-666/Codes
fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b
[ "MIT" ]
null
null
null
Python/SQLite/anoka.py
programmer-666/Codes
fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b
[ "MIT" ]
null
null
null
Python/SQLite/anoka.py
programmer-666/Codes
fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b
[ "MIT" ]
1
2021-09-16T14:24:29.000Z
2021-09-16T14:24:29.000Z
import os logo = """ ░█████╗░███╗░░██╗░█████╗░██╗░░██╗░█████╗░ ██╔══██╗████╗░██║██╔══██╗██║░██╔╝██╔══██╗ ███████║██╔██╗██║██║░░██║█████═╝░███████║ ██╔══██║██║╚████║██║░░██║██╔═██╗░██╔══██║ ██║░░██║██║░╚███║╚█████╔╝██║░╚██╗██║░░██║ ╚═╝░░╚═╝╚═╝░░╚══╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝ """ os.system("cls") print(logo+"\n", "_"*50)
31.8
45
0.113208
16
318
17.5625
0.875
0
0
0
0
0
0
0
0
0
0
0.006579
0.044025
318
10
46
31.8
0.108553
0
0
0
0
0
0.811912
0.77116
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0.1
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1c694d7bd64f13f698134a319fbab6f52501d1b6
4,183
py
Python
tests/simulator/test_simulator_cli.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
tests/simulator/test_simulator_cli.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
tests/simulator/test_simulator_cli.py
virtualcell/Biosimulators_utils
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
[ "MIT" ]
null
null
null
from biosimulators_utils.simulator.cli import build_cli from biosimulators_utils.simulator.environ import ENVIRONMENT_VARIABLES import capturer import sys import unittest class CliTestCase(unittest.TestCase): def setUp(self): def exec_sedml_docs_in_combine_archive(archive_filename, outputs_dirname): if archive_filename: print(archive_filename) if outputs_dirname: raise Exception(outputs_dirname) self.App = build_cli('test-simulator', '4.5.6', 'Test Simulator', '1.2.3', 'https://test-simulator.org', exec_sedml_docs_in_combine_archive, environment_variables=ENVIRONMENT_VARIABLES.values()) def test_help(self): with self.assertRaises(SystemExit) as cm: with self.App(argv=['-h']) as app: app.run() self.assertEqual(cm.exception.code, 0) with self.assertRaises(SystemExit) as cm: with self.App(argv=['--help']) as app: app.run() self.assertEqual(cm.exception.code, 0) def test_version(self): with self.App(argv=['-v']) as app: with capturer.CaptureOutput(merged=False, relay=False) as captured: with self.assertRaises(SystemExit) as cm: app.run() self.assertEqual(cm.exception.code, 0) stdout = captured.stdout.get_text() self.assertIn('Test Simulator: 1.2.3', stdout) self.assertIn('CLI: 4.5.6', stdout) self.assertIn('Python: {}.{}.{}'.format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro), stdout) self.assertEqual(captured.stderr.get_text(), '') with self.App(argv=['--version']) as app: with capturer.CaptureOutput(merged=False, relay=False) as captured: with self.assertRaises(SystemExit) as cm: app.run() self.assertEqual(cm.exception.code, 0) stdout = captured.stdout.get_text() self.assertIn('Test Simulator: 1.2.3', stdout) self.assertIn('CLI: 4.5.6', stdout) self.assertIn('Python: {}.{}.{}'.format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro), stdout) self.assertEqual(captured.stderr.get_text(), '') def test_exec_archive(self): with self.App(argv=['-i', 'path to COMBINE/OMEX archive', '-o', '']) as app: with capturer.CaptureOutput(merged=False, relay=False) as captured: app.run() self.assertEqual(captured.stdout.get_text(), 'path to COMBINE/OMEX archive') self.assertEqual(captured.stderr.get_text(), '') with self.App(argv=['-i', '', '-o', 'path to directory to save outputs']) as app: with capturer.CaptureOutput(merged=False, relay=False) as captured: with self.assertRaises(SystemExit) as cm: app.run() self.assertEqual(cm.exception.code, 'path to directory to save outputs') self.assertEqual(captured.stdout.get_text(), '') self.assertEqual(captured.stderr.get_text(), '') def test_error(self): def exec_sedml_docs_in_combine_archive(archive_filename, outputs_dirname): pass with self.assertRaises(ValueError): self.App = build_cli(None, '4.5.6', 'Test Simulator', '1.2.3', 'https://test-simulator.org', exec_sedml_docs_in_combine_archive) with self.assertRaises(ValueError): self.App = build_cli('test-simulator', '4.5.6', None, '1.2.3', 'https://test-simulator.org', exec_sedml_docs_in_combine_archive) with self.assertRaises(ValueError): self.App = build_cli('test-simulator', '4.5.6', 'Test Simulator', '1.2.3', 'https://test-simulator.org', None)
48.08046
136
0.573512
471
4,183
4.961783
0.178344
0.047925
0.068464
0.038511
0.829696
0.792041
0.73727
0.73727
0.69662
0.69662
0
0.013774
0.305761
4,183
86
137
48.639535
0.790978
0
0
0.594595
0
0
0.112599
0
0
0
0
0
0.337838
1
0.094595
false
0.013514
0.067568
0
0.175676
0.013514
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1c6bfab346eab909fa68ef80c560cfebb15398c1
109
py
Python
directory_logger/__init__.py
rdavid369/directory_logger
0b82674b7829cb452f620f5de2b5dcbfe07b0c90
[ "MIT" ]
null
null
null
directory_logger/__init__.py
rdavid369/directory_logger
0b82674b7829cb452f620f5de2b5dcbfe07b0c90
[ "MIT" ]
null
null
null
directory_logger/__init__.py
rdavid369/directory_logger
0b82674b7829cb452f620f5de2b5dcbfe07b0c90
[ "MIT" ]
null
null
null
''' Attributes to be assigned to the package ''' from .directory import Directory from .logger import Logger
27.25
48
0.770642
15
109
5.6
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.155963
109
3
49
36.333333
0.913043
0.366972
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
1c96aab0f66f6d4a61731bde338360baf9d3b60c
245
py
Python
src/utils/tail_call_optimize.py
ethe/elanus
528ef897f8bad80bcda072206e8e01516532ed75
[ "MIT" ]
4
2016-10-29T10:37:23.000Z
2020-09-22T13:13:10.000Z
src/utils/tail_call_optimize.py
ethe/elanus
528ef897f8bad80bcda072206e8e01516532ed75
[ "MIT" ]
null
null
null
src/utils/tail_call_optimize.py
ethe/elanus
528ef897f8bad80bcda072206e8e01516532ed75
[ "MIT" ]
1
2020-07-13T03:18:51.000Z
2020-07-13T03:18:51.000Z
# -*- coding: utf-8 -*- def trampoline(f, *args, **kwargs): def trampolined_f(*args, **kwargs): result = f(*args, **kwargs) while callable(result): result = result() return result return trampolined_f
27.222222
39
0.567347
27
245
5.074074
0.481481
0.109489
0.240876
0
0
0
0
0
0
0
0
0.005682
0.281633
245
8
40
30.625
0.772727
0.085714
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0
0
0.571429
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
c721aa81379dcf2248e83d508d662aa8610fabf0
34
py
Python
logger/__init__.py
busitaro/watch-sharepoint-list
84b9be32df1ebc10d815b27fc82e40e1d513da61
[ "Apache-2.0" ]
null
null
null
logger/__init__.py
busitaro/watch-sharepoint-list
84b9be32df1ebc10d815b27fc82e40e1d513da61
[ "Apache-2.0" ]
null
null
null
logger/__init__.py
busitaro/watch-sharepoint-list
84b9be32df1ebc10d815b27fc82e40e1d513da61
[ "Apache-2.0" ]
null
null
null
from .logger import create_logger
17
33
0.852941
5
34
5.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c744fbccf367369bf7ae2690e441c4198a9ba592
5,909
py
Python
tests/unit/array/mixins/test_eval_class.py
MarkusSagen/docarray
2b535b2f3e361ec48d50877615b535f6dc361b45
[ "Apache-2.0" ]
1
2022-03-16T14:05:32.000Z
2022-03-16T14:05:32.000Z
tests/unit/array/mixins/test_eval_class.py
MarkusSagen/docarray
2b535b2f3e361ec48d50877615b535f6dc361b45
[ "Apache-2.0" ]
null
null
null
tests/unit/array/mixins/test_eval_class.py
MarkusSagen/docarray
2b535b2f3e361ec48d50877615b535f6dc361b45
[ "Apache-2.0" ]
null
null
null
import copy import numpy as np import pytest from docarray import DocumentArray, Document @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 256}), ('qdrant', {'n_dim': 256}), ], ) @pytest.mark.parametrize( 'metric_fn, kwargs', [ ('r_precision', {}), ('precision_at_k', {}), ('hit_at_k', {}), ('average_precision', {}), ('reciprocal_rank', {}), ('recall_at_k', {'max_rel': 9}), ('f1_score_at_k', {'max_rel': 9}), ('ndcg_at_k', {}), ], ) def test_eval_mixin_perfect_match(metric_fn, kwargs, storage, config, start_storage): da1 = DocumentArray.empty(10) da1.embeddings = np.random.random([10, 256]) da1_index = DocumentArray(da1, storage=storage, config=config) da1.match(da1_index, exclude_self=True) r = da1.evaluate(da1, metric=metric_fn, strict=False, **kwargs) assert isinstance(r, float) assert r == 1.0 for d in da1: assert d.evaluations[metric_fn].value == 1.0 @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 256}), ('qdrant', {'n_dim': 256}), ], ) @pytest.mark.parametrize( 'metric_fn, kwargs', [ ('r_precision', {}), ('precision_at_k', {}), ('hit_at_k', {}), ('average_precision', {}), ('reciprocal_rank', {}), ('recall_at_k', {'max_rel': 9}), ('f1_score_at_k', {'max_rel': 9}), ('ndcg_at_k', {}), ], ) def test_eval_mixin_zero_match(storage, config, metric_fn, kwargs): da1 = DocumentArray.empty(10) da1.embeddings = np.random.random([10, 256]) da1_index = DocumentArray(da1, storage=storage, config=config) da1.match(da1_index, exclude_self=True) da2 = copy.deepcopy(da1) da2.embeddings = np.random.random([10, 256]) da2_index = DocumentArray(da2, storage=storage, config=config) da2.match(da2_index, exclude_self=True) r = da1.evaluate(da2, metric=metric_fn, **kwargs) assert isinstance(r, float) assert r == 1.0 for d in da1: d: Document assert d.evaluations[metric_fn].value == 1.0 @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 256}), ('qdrant', {'n_dim': 256}), ], ) def test_diff_len_should_raise(storage, config): da1 = DocumentArray.empty(10) da2 = DocumentArray.empty(5, storage=storage, config=config) with pytest.raises(ValueError): da1.evaluate(da2, metric='precision_at_k') @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 256}), ('qdrant', {'n_dim': 256}), ], ) def test_diff_hash_fun_should_raise(storage, config): da1 = DocumentArray.empty(10) da2 = DocumentArray.empty(10, storage=storage, config=config) with pytest.raises(ValueError): da1.evaluate(da2, metric='precision_at_k') @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 3}), ('qdrant', {'n_dim': 3}), ], ) def test_same_hash_same_len_fun_should_work(storage, config): da1 = DocumentArray.empty(10) da1.embeddings = np.random.random([10, 3]) da1_index = DocumentArray(da1, storage=storage, config=config) da1.match(da1_index) da2 = DocumentArray.empty(10) da2.embeddings = np.random.random([10, 3]) da2_index = DocumentArray(da1, storage=storage, config=config) da2.match(da2_index) with pytest.raises(ValueError): da1.evaluate(da2, metric='precision_at_k') for d1, d2 in zip(da1, da2): d1.id = d2.id da1.evaluate(da2, metric='precision_at_k') @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 3}), ('qdrant', {'n_dim': 3}), ], ) def test_adding_noise(storage, config): da = DocumentArray.empty(10) da.embeddings = np.random.random([10, 3]) da_index = DocumentArray(da, storage=storage, config=config) da.match(da_index, exclude_self=True) da2 = copy.deepcopy(da) for d in da2: d.matches.extend(DocumentArray.empty(10)) d.matches = d.matches.shuffle() assert da2.evaluate(da, metric='precision_at_k', k=10) < 1.0 for d in da2: assert 0.0 < d.evaluations['precision_at_k'].value < 1.0 @pytest.mark.parametrize( 'storage, config', [ ('memory', {}), ('weaviate', {}), ('sqlite', {}), ('annlite', {'n_dim': 128}), ('qdrant', {'n_dim': 128}), ], ) @pytest.mark.parametrize( 'metric_fn, kwargs', [ ('recall_at_k', {}), ('f1_score_at_k', {}), ], ) def test_diff_match_len_in_gd(storage, config, metric_fn, kwargs): da1 = DocumentArray.empty(10) da1.embeddings = np.random.random([10, 128]) da1_index = DocumentArray(da1, storage=storage, config=config) da1.match(da1, exclude_self=True) da2 = copy.deepcopy(da1) da2.embeddings = np.random.random([10, 128]) da2_index = DocumentArray(da2, storage=storage, config=config) da2.match(da2_index, exclude_self=True) # pop some matches from first document da2[0].matches.pop(8) r = da1.evaluate(da2, metric=metric_fn, **kwargs) assert isinstance(r, float) np.testing.assert_allclose(r, 1.0, rtol=1e-2) # for d in da1: d: Document # f1_score does not yield 1 for the first document as one of the match is missing assert d.evaluations[metric_fn].value > 0.9
27.741784
89
0.588594
722
5,909
4.637119
0.157895
0.09319
0.062724
0.077658
0.788829
0.78644
0.734767
0.714456
0.701912
0.701912
0
0.043932
0.237265
5,909
212
90
27.872642
0.698913
0.019631
0
0.663043
0
0
0.136293
0
0
0
0
0
0.059783
1
0.038043
false
0
0.021739
0
0.059783
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c753bde738cc7add93425243bcdcef2b80181901
5,839
py
Python
fuse_examples/classification/skin_lesion/download.py
LaudateCorpus1/fuse-med-ml
56821be0824b599dfbbd7a4b158436cb2c300812
[ "Apache-2.0" ]
57
2021-06-23T12:16:08.000Z
2022-03-29T15:51:25.000Z
fuse_examples/classification/skin_lesion/download.py
LaudateCorpus1/fuse-med-ml
56821be0824b599dfbbd7a4b158436cb2c300812
[ "Apache-2.0" ]
3
2021-11-04T07:56:13.000Z
2022-03-15T08:33:13.000Z
fuse_examples/classification/skin_lesion/download.py
LaudateCorpus1/fuse-med-ml
56821be0824b599dfbbd7a4b158436cb2c300812
[ "Apache-2.0" ]
19
2021-07-19T13:29:22.000Z
2022-03-23T07:48:13.000Z
""" (C) Copyright 2021 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Created on June 30, 2021 """ import os import requests import zipfile import io import wget import logging def download_and_extract_isic(root_data: str, year: str = '2016'): """ Download images and metadata from ISIC challenge :param root_data: path where data should be located :param year: ISIC challenge year (2016 or 2017) """ lgr = logging.getLogger('Fuse') if year == '2016': # 2016 - Train if not os.path.exists(os.path.join(root_data, 'data/ISIC2016_Training_Data')): lgr.info('Extract ISIC-2016 training data...') url = 'https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part3_Training_Data.zip' r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(os.path.join(root_data, 'data')) os.rename(os.path.join(root_data, 'data/ISBI2016_ISIC_Part3_Training_Data'), os.path.join(root_data, 'data/ISIC2016_Training_Data')) lgr.info('Extracting ISIC-2016 training data: done') if not os.path.exists(os.path.join(root_data, 'data/ISIC2016_Training_GroundTruth.csv')): url = 'https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part3_Training_GroundTruth.csv' wget.download(url, os.path.join(root_data, 'data/ISIC2016_Training_GroundTruth.csv')) # 2016 - Test if not os.path.exists(os.path.join(root_data, 'data/ISIC2016_Test_Data')): lgr.info('Extract ISIC-2016 test data...') url = 'https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part3_Test_Data.zip' r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(os.path.join(root_data, 'data')) os.rename(os.path.join(root_data, 'data/ISBI2016_ISIC_Part3_Test_Data'), os.path.join(root_data, 'data/ISIC2016_Test_Data')) lgr.info('Extracting ISIC-2016 test data: done') if not os.path.exists(os.path.join(root_data, 'data/ISIC2016_Test_GroundTruth.csv')): url = 'https://isic-challenge-data.s3.amazonaws.com/2016/ISBI2016_ISIC_Part3_Test_GroundTruth.csv' wget.download(url, os.path.join(root_data, 'data/ISIC2016_Test_GroundTruth.csv')) if year == '2017': # 2017 - Train if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Training_Data')): lgr.info('\nExtract ISIC-2017 training data... (this may take a few minutes)') url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Data.zip' r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(os.path.join(root_data, 'data')) os.rename(os.path.join(root_data, 'data/ISIC-2017_Training_Data'), os.path.join(root_data, 'data/ISIC2017_Training_Data')) lgr.info('Extracting ISIC-2017 training data: done') if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Training_GroundTruth.csv')): url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Training_Part3_GroundTruth.csv' wget.download(url, os.path.join(root_data, 'data/ISIC2017_Training_GroundTruth.csv')) # 2017 - Validation if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Validation_Data')): lgr.info('\nExtract ISIC-2017 validation data... (this may take a few minutes)') url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Validation_Data.zip' r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(os.path.join(root_data, 'data')) os.rename(os.path.join(root_data, 'data/ISIC-2017_Validation_Data'), os.path.join(root_data, 'data/ISIC2017_Validation_Data')) lgr.info('Extracting ISIC-2017 validation data: done') if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Validation_GroundTruth.csv')): url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Validation_Part3_GroundTruth.csv' wget.download(url, os.path.join(root_data, 'data/ISIC2017_Validation_GroundTruth.csv')) # 2017 - Test if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Test_Data')): lgr.info('\nExtracting ISIC-2017 test data... (this may take a few minutes)') url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Test_v2_Data.zip' r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(os.path.join(root_data, 'data')) os.rename(os.path.join(root_data, 'data/ISIC-2017_Test_v2_Data'), os.path.join(root_data, 'data/ISIC2017_Test_Data')) lgr.info('Extracting ISIC-2017 test data: done') if not os.path.exists(os.path.join(root_data, 'data/ISIC2017_Test_GroundTruth.csv')): url = 'https://isic-challenge-data.s3.amazonaws.com/2017/ISIC-2017_Test_v2_Part3_GroundTruth.csv' wget.download(url, os.path.join(root_data, 'data/ISIC2017_Test_GroundTruth.csv'))
46.34127
114
0.667923
818
5,839
4.617359
0.161369
0.063542
0.079428
0.111199
0.77919
0.776542
0.736299
0.736299
0.726767
0.726767
0
0.063945
0.207227
5,839
125
115
46.712
0.751998
0.136325
0
0.214286
0
0.085714
0.426259
0.156475
0
0
0
0
0
1
0.014286
false
0
0.085714
0
0.1
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c7a193c4e180fa2603a0ea8bfc2c963fd38a3c85
96
py
Python
prose_example/blog/admin.py
parisk/django-prose
c5962cc1a18b1ea9e8c11947b913a25951301d52
[ "MIT" ]
1
2022-01-11T08:04:07.000Z
2022-01-11T08:04:07.000Z
python/blog-site/blog/admin.py
duckpage/learning
5cb9af280c7da8f48d68f390f6bb95ec6fd60211
[ "MIT" ]
null
null
null
python/blog-site/blog/admin.py
duckpage/learning
5cb9af280c7da8f48d68f390f6bb95ec6fd60211
[ "MIT" ]
5
2019-05-30T20:12:34.000Z
2020-09-20T08:17:26.000Z
from django.contrib import admin from blog.models import Article admin.site.register(Article)
16
32
0.822917
14
96
5.642857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.114583
96
5
33
19.2
0.929412
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c7a682672fd1cb2cd2710f75e4ea216ed4e4093e
33,841
py
Python
cloud_storage_layer/google/google_cloud_storage.py
rohandhanraj/Auto-AI-Pipeline
d5f39715c802db45afae0d5978d228bf0bcd2f0a
[ "MIT" ]
null
null
null
cloud_storage_layer/google/google_cloud_storage.py
rohandhanraj/Auto-AI-Pipeline
d5f39715c802db45afae0d5978d228bf0bcd2f0a
[ "MIT" ]
null
null
null
cloud_storage_layer/google/google_cloud_storage.py
rohandhanraj/Auto-AI-Pipeline
d5f39715c802db45afae0d5978d228bf0bcd2f0a
[ "MIT" ]
null
null
null
""" AWS SDK for Python (Boto3) to create, configure, and manage AWS services, such as Amazon Elastic Compute Cloud (Amazon EC2) and Amazon Simple Storage Service (Amazon S3) """ import json from project_library_layer.credentials.credential_data import get_google_cloud_storage_credentials from project_library_layer.initializer.initializer import Initializer from exception_layer.generic_exception.generic_exception import GenericException as GoogleCloudException import sys, os import dill import io import pandas as pd from google.cloud import storage from google.oauth2 import service_account class GoogleCloudStorage: def __init__(self, bucket_name=None, region_name=None): """ :param bucket_name:specify bucket name :param region_name: specify region name """ try: initial = Initializer() if bucket_name is None: self.bucket_name = initial.get_google_bucket_name() else: self.bucket_name = bucket_name credentials = service_account.Credentials.from_service_account_info( get_google_cloud_storage_credentials() ) self.client = storage.Client(credentials=credentials) existing_bucket_name = [bucket.name for bucket in self.client.list_buckets()] if self.bucket_name not in existing_bucket_name: self.bucket = self.client.create_bucket(self.bucket_name) else: self.bucket = self.client.get_bucket(self.bucket_name) except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create object of GoogleCloudStorage in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, "__init__")) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def add_param(self, acceptable_param, additional_param): """ :param acceptable_param: specify param list can be added :param additional_param: accepts a dictionary object :return: list of param added to current instance of class """ try: self.__dict__.update((k, v) for k, v in additional_param.items() if k in acceptable_param) return [k for k in additional_param.keys() if k in acceptable_param] except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to add parameter in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.add_param.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def filter_param(self, acceptable_param, additional_param): """ :param acceptable_param: specify param list can be added :param additional_param: accepts a dictionary object :return: dict of param after filter """ try: accepted_param = {} accepted_param.update((k, v) for k, v in additional_param.items() if k in acceptable_param) return accepted_param except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to filter parameter in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.filter_param.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def remove_param(self, param): """ :param param: list of param argument need to deleted from instance object :return True if deleted successfully else false: """ try: for key in param: self.__dict__.pop(key) return True except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to remove parameter in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.remove_param.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def list_directory(self, directory_full_path=None): """ :param directory_full_path:directory path :return: {'status': True/False, 'message': 'message detail' , 'directory_list': directory_list will be added if status is True} """ try: if directory_full_path == "" or directory_full_path == "/" or directory_full_path is None: directory_full_path = "" else: if directory_full_path[-1] != "/": directory_full_path += "/" is_directory_exist = False directory_list = [] for key in self.client.list_blobs(self.bucket_name, prefix=directory_full_path): is_directory_exist = True dir_name = str(key.name).replace(directory_full_path, "") slash_index = dir_name.find("/") if slash_index >= 0: name_after_slash = dir_name[slash_index + 1:] if len(name_after_slash) <= 0: directory_list.append(dir_name) else: if dir_name != "": directory_list.append(dir_name) if is_directory_exist: return {'status': True, 'message': 'Directory [{0}] exist'.format(directory_full_path) , 'directory_list': directory_list} else: return {'status': False, 'message': 'Directory [{0}] does not exist'.format(directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to list directory in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.list_directory.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def list_files(self, directory_full_path): """ :param directory_full_path: directory :return: {'status': True/False, 'message': 'message detail' , 'files_list': files_list will be added if status is True} """ try: if directory_full_path == "" or directory_full_path == "/" or directory_full_path is None: directory_full_path = "" else: if directory_full_path[-1] != "/": directory_full_path += "/" is_directory_exist = False list_files = [] for key in self.client.list_blobs(self.bucket_name, prefix=directory_full_path): is_directory_exist = True file_name = str(key.name).replace(directory_full_path, "") if "/" not in file_name and file_name != "": list_files.append(file_name) if is_directory_exist: return {'status': True, 'message': 'Directory [{0}] present'.format(directory_full_path) , 'files_list': list_files} else: return {'status': False, 'message': 'Directory [{0}] is not present'.format(directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to list files in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.list_files.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def list_buckets(self): """ :return: All bucket names available in your gcp cloud storage {'status':True,'message':'message','bucket_list':'bucket_list} """ try: existing_bucket = [bucket.name for bucket in self.client.list_buckets()] return {'status': True, 'message': 'Bucket retrived', 'bucket_list': existing_bucket} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to list bucket in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.list_buckets.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def create_bucket(self, bucket_name, over_write=False): """ :param bucket_name: Name of bucket :param over_write: If true then existing bucket content will be removed :return: True if created else False """ try: bucket_list = [bucket.name for bucket in self.client.list_buckets()] if bucket_name not in bucket_list: self.client.create_bucket(bucket_name) return {'status': True, 'message': "Bucket {0} created successfully".format(bucket_name) } else: return {'status': False, 'message': "Bukcet {0} alredy exists".format(bucket_name)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create bucket in object in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.create_bucket.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def create_directory(self, directory_full_path, over_write=False, **kwargs): """ :param directory_full_path: provide full directory path along with name :param over_write: default False if accept True then overwrite existing directory if exist :return {'status': True/False, 'message': 'message detail'} """ try: if directory_full_path == "" or directory_full_path == "/" or directory_full_path is None: return {'status': False, 'message': 'Provide directory name'} directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.list_directory(directory_full_path) if over_write and response['status']: self.remove_directory(directory_full_path) if not over_write: if response['status']: return {'status': False, 'message': 'Directory is already present. try with overwrite option.'} possible_directory = directory_full_path[:-1].split("/") directory_name = "" for dir_name in possible_directory: directory_name += dir_name + "/" response = self.list_directory(directory_name) if not response['status']: self.bucket.blob(directory_name).upload_from_string("") return {'status': True, 'message': 'Directory [{0}] created successfully '.format(directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create directory in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.create_directory.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def remove_directory(self, directory_full_path): """ :param directory_full_path:provide full directory path along with name kindly provide "" or "/" to remove all directory and file from bucket. :return: {'status': True/False, 'message': 'message detail'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) is_directory_found = False prefix_file_name = directory_full_path for blob in self.client.list_blobs(self.bucket_name, prefix=directory_full_path): is_directory_found = True blob.delete() if is_directory_found: return {'status': True, 'message': 'Directory [{0}] removed.'.format(directory_full_path)} else: return {'status': False, 'message': 'Directory [{0}] is not present.'.format(directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to delete directory in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.remove_directory.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def is_file_present(self, directory_full_path, file_name): """ :param directory_full_path:directory_full_path :param file_name: Name of file :return: {'status': True/False, 'message': 'message detail'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.list_files(directory_full_path) if response['status']: if file_name in response['files_list']: return {'status': True, 'message': 'File [{0}] is present.'.format(directory_full_path + file_name)} return {'status': False, 'message': 'File [{0}] is not present.'.format(directory_full_path + file_name)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to delete directory in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.is_file_present.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def is_directory_present(self, directory_full_path): """ :param directory_full_path: directory path :return: {'status': True/False, 'message': 'message detail"} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.list_directory(directory_full_path) if response['status']: return {'status': True, 'message': 'Directory [{0}] is present'.format(directory_full_path)} return {'status': False, 'message': 'Directory [{0}] is not present'.format(directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to delete directory in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.is_file_present.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def upload_file(self, directory_full_path, file_name, stream_data, local_file_path=False, over_write=False): """ :param directory_full_path: s3 bucket directory :param file_name: name you want to specify for file in s3 bucket :param local_file_path: your local system file path of file needs to be uploaded :param over_write:True if wanted to replace target file if present :return:{'status': True/False, 'message': 'message detail'} """ try: if directory_full_path == "" or directory_full_path == "/": directory_full_path = "" else: if directory_full_path[-1] != "/": directory_full_path += "/" response = self.is_directory_present(directory_full_path) if not response['status']: response = self.create_directory(directory_full_path) if not response['status']: return response response = self.is_file_present(directory_full_path, file_name) if response['status'] and not over_write: return {'status': False, 'message': 'File [{0}] already present in directory [{1}]. try with overwrite option'.format( file_name, directory_full_path)} blob = self.bucket.blob(directory_full_path + file_name) if local_file_path: blob.upload_from_filename(local_file_path) else: if isinstance(stream_data,str): stream_data=io.StringIO(stream_data) blob.upload_from_file(stream_data) return {'status': True, 'message': 'File [{0}] uploaded to directory [{1}]'.format(file_name, directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to upload file in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.upload_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def download_file(self, directory_full_path, file_name, local_system_directory=False): """ :param directory_full_path:directory_full_path :param file_name: Name of file :param local_system_directory: file location within your system :return: {'status': True/False, 'message':'message detail'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_file_present(directory_full_path=directory_full_path, file_name=file_name) if local_system_directory: local_system_directory = self.update_directory_full_path_string(local_system_directory) if not response['status']: return response blob = self.bucket.blob(directory_full_path + file_name) if local_system_directory: blob.download_to_filename(local_system_directory+file_name) return {'status': True, 'message': 'file [{0}] is downloaded in your system at location [{1}] ' .format(file_name, local_system_directory)} else: data = io.BytesIO() blob.download_to_file(data) return {'status': True, 'message': 'file [{0}] is downloaded in your system at location [{1}] ' .format(file_name, local_system_directory), 'file_object': data} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to upload file in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.download_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def remove_file(self, directory_full_path, file_name): """ :param directory_full_path: provide full directory path along with name :param file_name: file name with extension if possible :return: {'status': True/False, 'message':'message detail'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_file_present(directory_full_path, file_name) if response['status']: blob = self.bucket.blob(directory_full_path + file_name) blob.delete() return {'status': True, 'message': 'File [{}] deleted from directory [{}]'.format(file_name, directory_full_path)} return {'status': False, 'message': response['message']} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to remove file in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.remove_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def write_file_content(self, directory_full_path, file_name, content, over_write=False): """ :param directory_full_path: provide full directory path along with name :param file_name: file name with extension if possible :param content: content need to store in file :param over_write: default False if accept True then overwrite file in directory if exist :return: {'status': True/False, 'message':'message detail'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_directory_present(directory_full_path) if not response['status']: response = self.create_directory(directory_full_path) if not response['status']: return {'status': False, 'message': 'Failed to created directory [{0}] [{1}]'.format(directory_full_path, response['message'])} response = self.is_file_present(directory_full_path, file_name) if response['status'] and not over_write: return {'status': False, "message": "File [{0}] is already present in directory [{1}]. try with over write option".format( file_name, directory_full_path)} blob = self.bucket.blob(directory_full_path + file_name) blob.upload_from_file(io.BytesIO(dill.dumps(content))) return {'status': True, 'message': 'File [{0}] is created in directory [{1}]'.format(file_name, directory_full_path)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.write_file_content.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def update_directory_full_path_string(self, directory_full_path): """ :param directory_full_path: directory_full_path :return: update the accepted directory """ try: if directory_full_path == "" or directory_full_path == "/": directory_full_path = "" else: if directory_full_path[-1] != "/": directory_full_path = directory_full_path + "/" return directory_full_path except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.update_directory_full_path_string.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def read_csv_file(self, directory_full_path, file_name): """ :param directory_full_path: directory_full_path :param file_name: file_name :return: {'status': True/False, 'message': 'message detail', 'data_frame': if status True data frame will be returned} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_file_present(directory_full_path, file_name) if not response['status']: return response blob = self.bucket.blob(directory_full_path + file_name) content=io.BytesIO() blob.download_to_file(content) content.seek(0) df = pd.read_csv(content) return {'status': True, 'message': 'File [{0}] has been read into data frame'.format(directory_full_path + file_name), 'data_frame': df} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.update_directory_full_path_string.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def read_json_file(self,directory_full_path, file_name): try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_file_present(directory_full_path, file_name) if not response['status']: return response content = io.BytesIO() blob = self.bucket.blob(directory_full_path + file_name) blob.download_to_file(content) content.seek(0) return {'status': True, 'message': 'File [{0}] has been read'.format(directory_full_path + file_name), 'file_content': json.loads(content.getvalue())} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.read_json_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def read_file_content(self, directory_full_path, file_name): """ :param directory_full_path: directory_full_path :param file_name: file_name :return: {'status': True/False, 'message': 'message_detail', 'file_content':'If status True then Return object which was used to generate the file with write file content'} """ try: directory_full_path = self.update_directory_full_path_string(directory_full_path) response = self.is_file_present(directory_full_path, file_name) if not response['status']: return response content = io.BytesIO() blob=self.bucket.blob(directory_full_path+file_name) blob.download_to_file(content) content.seek(0) return {'status': True, 'message': 'File [{0}] has been read'.format(directory_full_path + file_name), 'file_content': dill.loads(content.getvalue())} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.read_file_content.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def move_file(self, source_directory_full_path, target_directory_full_path, file_name, over_write=False, bucket_name=None): """ :param source_directory_full_path: provide source directory path along with name :param target_directory_full_path: provide target directory path along with name :param file_name: file need to move :param over_write: default False if accept True then overwrite file in target directory if exist :return: {'status': True/False, 'message': 'message detail'} """ try: response = self.copy_file(source_directory_full_path, target_directory_full_path, file_name, over_write, bucket_name) if not response['status']: return {'status': False, 'message': 'Failed to move file due to [{}]'.format(response['message'])} else: if bucket_name is None: bucket_name = self.bucket_name self.remove_file(source_directory_full_path, file_name) return {'status': True, 'message': 'File moved successfully from bucket: [{0}] directory [{1}] to bucket:[{2}] ' 'directory[{3}]'.format(self.bucket_name, source_directory_full_path + file_name, bucket_name, target_directory_full_path + file_name)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.move_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e def copy_file(self, source_directory_full_path, target_directory_full_path, file_name, over_write=False, bucket_name=None): """ :param source_directory_full_path: provide source directory path along with name :param target_directory_full_path: provide target directory path along with name :param file_name: file need to copy :param over_write: default False if accept True then overwrite file in target directory if exist :return: {'status': True/False, 'message': 'message detail'} """ try: target_directory_full_path = self.update_directory_full_path_string(target_directory_full_path) source_directory_full_path = self.update_directory_full_path_string(source_directory_full_path) response = self.is_file_present(source_directory_full_path, file_name) if not response['status']: return {'status': False, 'message': 'Source file [{0}] is not present'.format(source_directory_full_path + file_name)} if bucket_name is None: bucket_name = self.bucket_name gcp_obj = self else: bucket_name = bucket_name gcp_obj = GoogleCloudStorage(bucket_name=bucket_name) response = gcp_obj.is_file_present(target_directory_full_path, file_name) if response['status'] and not over_write: return {'status': False, 'message': 'Bucket:[{0}] target directory ' '[{1}] contains file [{2}] please' ' try with over write option.'.format(bucket_name, target_directory_full_path, file_name )} response = gcp_obj.is_directory_present(target_directory_full_path) if not response['status']: response = gcp_obj.create_directory(target_directory_full_path) if not response['status']: return {'status': False, 'message': 'Failed to created' ' target directory [{}] in bucket:[{}]'.format( target_directory_full_path, bucket_name )} blob=self.bucket.blob(source_directory_full_path+file_name) self.bucket.copy_blob(blob,gcp_obj.bucket,target_directory_full_path+file_name) return {'status': True, 'message': 'File copied successfully from bucket: [{0}] directory [{1}] to bucket:[{2}] ' 'directory[{3}]'.format(self.bucket_name, source_directory_full_path + file_name, bucket_name, target_directory_full_path + file_name)} except Exception as e: google_cloud_exception = GoogleCloudException( "Failed to create file with content in module [{0}] class [{1}] method [{2}]" .format(GoogleCloudStorage.__module__.__str__(), GoogleCloudStorage.__name__, self.copy_file.__name__)) raise Exception(google_cloud_exception.error_message_detail(str(e), sys)) from e
53.125589
131
0.602051
3,652
33,841
5.225904
0.059146
0.117841
0.1541
0.045114
0.803406
0.766623
0.737805
0.704899
0.686927
0.669741
0
0.005347
0.30924
33,841
636
132
53.20912
0.811088
0.129133
0
0.601322
0
0
0.127381
0
0
0
0
0
0
1
0.048458
false
0
0.022026
0
0.167401
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c7aaa5c6de4d8b7510fbcdc33320c330f5480814
187
py
Python
core/tests/unit/test_exceptions.py
wattlecloud/foundation-server
e1467d192a7729fa4f116c80dcd001bfd58662e8
[ "Apache-2.0" ]
null
null
null
core/tests/unit/test_exceptions.py
wattlecloud/foundation-server
e1467d192a7729fa4f116c80dcd001bfd58662e8
[ "Apache-2.0" ]
1
2021-07-20T00:28:27.000Z
2021-07-20T00:28:27.000Z
core/tests/unit/test_exceptions.py
wattlecloud/foundation-server
e1467d192a7729fa4f116c80dcd001bfd58662e8
[ "Apache-2.0" ]
null
null
null
from wattle.core.const import CoreErrorType from wattle.core.exceptions import CoreError def test_core_error(): core_error = CoreError("TEST") assert core_error.type == "TEST"
20.777778
44
0.759358
25
187
5.52
0.52
0.195652
0.202899
0
0
0
0
0
0
0
0
0
0.149733
187
8
45
23.375
0.867925
0
0
0
0
0
0.042781
0
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
c7e4f95cba844a9f83ac15bb9e854b7c964d36ee
6,139
py
Python
Linear Associator/test_linear_associator.py
FurkhanShaikh/Neural-Networks
7e36140cb54204b52c8f51c6a52c3cd77bf9105c
[ "MIT" ]
null
null
null
Linear Associator/test_linear_associator.py
FurkhanShaikh/Neural-Networks
7e36140cb54204b52c8f51c6a52c3cd77bf9105c
[ "MIT" ]
null
null
null
Linear Associator/test_linear_associator.py
FurkhanShaikh/Neural-Networks
7e36140cb54204b52c8f51c6a52c3cd77bf9105c
[ "MIT" ]
null
null
null
# Shaikh, Mohammed Furkhan # 2020-03-01 import numpy as np from linear_associator import LinearAssociator def test_weights(): input_dimensions = 4 number_of_nodes = 9 model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Hard_limit") weights=model.get_weights() assert weights.ndim == 2 and \ weights.shape[0] == number_of_nodes and \ weights.shape[1] == (input_dimensions) model.set_weights(np.ones((number_of_nodes, input_dimensions))) weights = model.get_weights() assert weights.ndim == 2 and \ weights.shape[0] == number_of_nodes and \ weights.shape[1] == (input_dimensions) assert np.array_equal(model.get_weights(), np.ones((number_of_nodes, input_dimensions))) model.initialize_weights(seed=3) weights = np.array([[ 1.78862847, 0.43650985, 0.09649747, -1.8634927 ], [-0.2773882 , -0.35475898, -0.08274148, -0.62700068], [-0.04381817, -0.47721803, -1.31386475, 0.88462238], [ 0.88131804, 1.70957306, 0.05003364, -0.40467741], [-0.54535995, -1.54647732, 0.98236743, -1.10106763], [-1.18504653, -0.2056499 , 1.48614836, 0.23671627], [-1.02378514, -0.7129932 , 0.62524497, -0.16051336], [-0.76883635, -0.23003072, 0.74505627, 1.97611078], [-1.24412333, -0.62641691, -0.80376609, -2.41908317]]) np.testing.assert_array_almost_equal(model.get_weights(), weights, decimal=3) def test_predict_linear(): input_dimensions = 2 number_of_nodes = 5 model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Linear") model.initialize_weights(seed=1) X_train = np.array([[-1.43815556, 0.10089809, -1.25432937, 1.48410426], [-1.81784194, 0.42935033, -1.2806198, 0.06527391]]) y = np.array([[-1.22398485, -0.09876447, -1.25403672, 2.37076614], [ 2.7100805 , -0.51397095, 2.0365662 , -0.8538988 ], [ 2.93924278, -0.90084842, 1.86188982, 1.13412472], [-1.12555691, -0.15077626, -1.21375201, 2.53979562], [-0.00551192, -0.07487682, -0.08083147, 0.4572099 ]]) y_hat = model.predict(X_train) np.testing.assert_array_almost_equal(y_hat, y, decimal=4) def test_predict_hard_limit(): input_dimensions = 2 number_of_nodes = 5 model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Hard_limit") model.initialize_weights(seed=1) X_train = np.array([[-1.43815556, 0.10089809, -1.25432937, 1.48410426], [-1.81784194, 0.42935033, -1.2806198, 0.06527391]]) y = np.array([[0, 0, 0, 1], [1, 0, 1, 0], [1, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 1]]) y_hat = model.predict(X_train) np.testing.assert_array_almost_equal(y_hat, y, decimal=4) def test_pseudo_inverse_fit(): input_dimensions = 5 number_of_nodes = 5 model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Linear") model.initialize_weights(seed=1) X_train = np.random.randn(input_dimensions, 10) out = model.predict(X_train) model.set_weights(np.zeros_like(model.get_weights())) model.fit_pseudo_inverse(X_train, out) new_out = model.predict(X_train) np.testing.assert_array_almost_equal(out, new_out, decimal=4) def test_train_linear_delta(): input_dimensions = 5 number_of_nodes = 5 for i in range(10): model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Linear") model.initialize_weights(seed=i+1) X_train = np.random.randn(input_dimensions, 100) out = model.predict(X_train) model.set_weights(np.random.randn(*model.get_weights().shape)) model.train(X_train, out, batch_size=10, num_epochs=50, alpha=0.1, gamma=0.1, learning="delta") new_out = model.predict(X_train) np.testing.assert_array_almost_equal(out, new_out, decimal=4) def test_train_hardlim_delta(): input_dimensions = 5 number_of_nodes = 5 for i in range(10): model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Hard_limit") model.initialize_weights(seed=i + 1) X_train = np.random.randn(input_dimensions, 100) out = model.predict(X_train) model.set_weights(np.random.randn(*model.get_weights().shape)) model.train(X_train, out, batch_size=10, num_epochs=200, alpha=0.1, gamma=0.1, learning="Delta") new_out = model.predict(X_train) np.testing.assert_array_almost_equal(out, new_out, decimal=4) def test_calculate_mean_squared_error(): input_dimensions = 5 number_of_nodes = 5 number_of_samples=18 model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Linear") model.initialize_weights(seed=1) X_train = np.random.randn(input_dimensions, number_of_samples) assert model.calculate_mean_squared_error(X_train,model.predict(X_train)) == 0 target=np.random.randn(number_of_nodes,number_of_samples) mse=model.calculate_mean_squared_error(X_train,target) np.testing.assert_array_almost_equal(mse, 3.7144714504979635, decimal=4) model = LinearAssociator(input_dimensions=input_dimensions, number_of_nodes=number_of_nodes, transfer_function="Hard_limit") model.initialize_weights(seed=1) X_train = np.random.randn(input_dimensions, number_of_samples) assert model.calculate_mean_squared_error(X_train,model.predict(X_train)) == 0 target=np.random.randn(number_of_nodes,number_of_samples) mse=model.calculate_mean_squared_error(X_train,target) np.testing.assert_array_almost_equal(mse, 1.1234558948088766, decimal=4)
46.507576
105
0.682033
840
6,139
4.711905
0.182143
0.068722
0.09525
0.05811
0.759222
0.755179
0.747347
0.735725
0.715008
0.705407
0
0.15611
0.197589
6,139
132
106
46.507576
0.647381
0.005701
0
0.660714
0
0
0.012393
0
0
0
0
0
0.116071
1
0.0625
false
0
0.017857
0
0.080357
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1bfd120e9811c46187cdf521348a2b0186a9c053
205
py
Python
snippets/numpy_testscript.py
rhishi/python-snippets
60020d3a187d7687b38b6b58f74ceb03a37983b9
[ "Apache-2.0" ]
null
null
null
snippets/numpy_testscript.py
rhishi/python-snippets
60020d3a187d7687b38b6b58f74ceb03a37983b9
[ "Apache-2.0" ]
null
null
null
snippets/numpy_testscript.py
rhishi/python-snippets
60020d3a187d7687b38b6b58f74ceb03a37983b9
[ "Apache-2.0" ]
null
null
null
# IPython log file from numpy import * arange(10) zeros(10) zeros(10, 10) zeros((10, 10)) img = zeros((10, 10, 10)) img[:,:,0] a = arange(10) a[2:5] a[2:2] a[2:1] a[2:3] a[::-1] a[0:10:-1] a[10:0:-1]
9.318182
25
0.55122
48
205
2.354167
0.333333
0.247788
0.238938
0.19469
0
0
0
0
0
0
0
0.223529
0.170732
205
21
26
9.761905
0.441176
0.078049
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
401e2697ef604753724e24961ffaaa9a7339e92a
17,057
py
Python
tests/providers/googledrive/test_provider.py
laurenrevere/waterbutler
4f358d94376997cd9592b18b67d13a7482c7aa2a
[ "Apache-2.0" ]
null
null
null
tests/providers/googledrive/test_provider.py
laurenrevere/waterbutler
4f358d94376997cd9592b18b67d13a7482c7aa2a
[ "Apache-2.0" ]
null
null
null
tests/providers/googledrive/test_provider.py
laurenrevere/waterbutler
4f358d94376997cd9592b18b67d13a7482c7aa2a
[ "Apache-2.0" ]
null
null
null
import pytest from tests.utils import async import io import aiohttpretty from waterbutler.core import streams from waterbutler.core import exceptions from waterbutler.core.path import WaterButlerPath from waterbutler.providers.googledrive import settings as ds from waterbutler.providers.googledrive import GoogleDriveProvider from waterbutler.providers.googledrive.provider import GoogleDrivePath from waterbutler.providers.googledrive.metadata import GoogleDriveRevision from waterbutler.providers.googledrive.metadata import GoogleDriveFileMetadata from waterbutler.providers.googledrive.metadata import GoogleDriveFolderMetadata from tests.providers.googledrive import fixtures @pytest.fixture def file_content(): return b'SLEEP IS FOR THE WEAK GO SERVE STREAMS' @pytest.fixture def file_like(file_content): return io.BytesIO(file_content) @pytest.fixture def file_stream(file_like): return streams.FileStreamReader(file_like) @pytest.fixture def auth(): return { 'name': 'cat', 'email': 'cat@cat.com', } @pytest.fixture def credentials(): return {'token': 'hugoandkim'} @pytest.fixture def settings(): return { 'folder': { 'id': '19003e', 'name': '/conrad/birdie', }, } @pytest.fixture def provider(auth, credentials, settings): return GoogleDriveProvider(auth, credentials, settings) class TestCRUD: # @async # @pytest.mark.aiohttpretty # def test_download_drive(self, provider): # path = '/birdie\'"".jpg' # item = fixtures.list_file['items'][0] # query = provider._build_query(provider.folder['id'], title=path.lstrip('/')) # assert 'birdie\\\'\\"\\".jpg' in query @async @pytest.mark.aiohttpretty def test_download_drive(self, provider): body = b'we love you conrad' item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], item['id'])) download_file_url = item['downloadUrl'] metadata_url = provider.build_url('files', path.identifier) aiohttpretty.register_json_uri('GET', metadata_url, body=item) aiohttpretty.register_uri('GET', download_file_url, body=body, auto_length=True) result = yield from provider.download(path) content = yield from result.read() assert content == body @async @pytest.mark.aiohttpretty def test_download_drive_revision(self, provider): revision = 'oldest' body = b'we love you conrad' item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], item['id'])) download_file_url = item['downloadUrl'] metadata_url = provider.build_url('files', path.identifier) revision_url = provider.build_url('files', item['id'], 'revisions', revision, alt='json') aiohttpretty.register_json_uri('GET', revision_url, body=item) aiohttpretty.register_json_uri('GET', metadata_url, body=item) aiohttpretty.register_uri('GET', download_file_url, body=body, auto_length=True) result = yield from provider.download(path, revision=revision) content = yield from result.read() assert content == body @async @pytest.mark.aiohttpretty def test_download_docs(self, provider): body = b'we love you conrad' item = fixtures.docs_file_metadata path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], item['id'])) metadata_url = provider.build_url('files', path.identifier) revisions_url = provider.build_url('files', item['id'], 'revisions') download_file_url = item['exportLinks']['application/vnd.openxmlformats-officedocument.wordprocessingml.document'] aiohttpretty.register_json_uri('GET', metadata_url, body=item) aiohttpretty.register_uri('GET', download_file_url, body=body, auto_length=True) aiohttpretty.register_json_uri('GET', revisions_url, body={'items': [{'id': 'foo'}]}) result = yield from provider.download(path) content = yield from result.read() assert content == body @async @pytest.mark.aiohttpretty def test_upload_create(self, provider, file_stream): upload_id = '7' item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], None)) start_upload_url = provider._build_upload_url('files', uploadType='resumable') finish_upload_url = provider._build_upload_url('files', uploadType='resumable', upload_id=upload_id) aiohttpretty.register_json_uri('PUT', finish_upload_url, body=item) aiohttpretty.register_uri('POST', start_upload_url, headers={'LOCATION': 'http://waterbutler.io?upload_id={}'.format(upload_id)}) result, created = yield from provider.upload(file_stream, path) expected = GoogleDriveFileMetadata(item, path).serialized() assert created is True assert result == expected assert aiohttpretty.has_call(method='PUT', uri=finish_upload_url) assert aiohttpretty.has_call(method='POST', uri=start_upload_url) @async @pytest.mark.aiohttpretty def test_upload_doesnt_unquote(self, provider, file_stream): upload_id = '7' item = fixtures.list_file['items'][0] path = GoogleDrivePath('/birdie%2F %20".jpg', _ids=(provider.folder['id'], None)) start_upload_url = provider._build_upload_url('files', uploadType='resumable') finish_upload_url = provider._build_upload_url('files', uploadType='resumable', upload_id=upload_id) aiohttpretty.register_json_uri('PUT', finish_upload_url, body=item) aiohttpretty.register_uri('POST', start_upload_url, headers={'LOCATION': 'http://waterbutler.io?upload_id={}'.format(upload_id)}) result, created = yield from provider.upload(file_stream, path) expected = GoogleDriveFileMetadata(item, path).serialized() assert created is True assert result == expected assert aiohttpretty.has_call(method='POST', uri=start_upload_url) assert aiohttpretty.has_call(method='PUT', uri=finish_upload_url) @async @pytest.mark.aiohttpretty def test_upload_update(self, provider, file_stream): upload_id = '7' item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], item['id'])) start_upload_url = provider._build_upload_url('files', path.identifier, uploadType='resumable') finish_upload_url = provider._build_upload_url('files', path.identifier, uploadType='resumable', upload_id=upload_id) aiohttpretty.register_json_uri('PUT', finish_upload_url, body=item) aiohttpretty.register_uri('PUT', start_upload_url, headers={'LOCATION': 'http://waterbutler.io?upload_id={}'.format(upload_id)}) result, created = yield from provider.upload(file_stream, path) assert aiohttpretty.has_call(method='PUT', uri=start_upload_url) assert aiohttpretty.has_call(method='PUT', uri=finish_upload_url) assert created is False expected = GoogleDriveFileMetadata(item, path).serialized() assert result == expected @async @pytest.mark.aiohttpretty def test_upload_create_nested(self, provider, file_stream): upload_id = '7' item = fixtures.list_file['items'][0] path = WaterButlerPath( '/ed/sullivan/show.mp3', _ids=[str(x) for x in range(3)] ) start_upload_url = provider._build_upload_url('files', uploadType='resumable') finish_upload_url = provider._build_upload_url('files', uploadType='resumable', upload_id=upload_id) aiohttpretty.register_uri('POST', start_upload_url, headers={'LOCATION': 'http://waterbutler.io?upload_id={}'.format(upload_id)}) aiohttpretty.register_json_uri('PUT', finish_upload_url, body=item) result, created = yield from provider.upload(file_stream, path) assert aiohttpretty.has_call(method='POST', uri=start_upload_url) assert aiohttpretty.has_call(method='PUT', uri=finish_upload_url) assert created is True expected = GoogleDriveFileMetadata(item, path).serialized() assert result == expected @async @pytest.mark.aiohttpretty def test_delete(self, provider): item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=(None, item['id'])) delete_url = provider.build_url('files', item['id']) aiohttpretty.register_uri('DELETE', delete_url, status=204) result = yield from provider.delete(path) assert result is None assert aiohttpretty.has_call(method='DELETE', uri=delete_url) @async @pytest.mark.aiohttpretty def test_delete_folder(self, provider): item = fixtures.folder_metadata delete_url = provider.build_url('files', item['id']) path = WaterButlerPath('/foobar/', _ids=('doesntmatter', item['id'])) aiohttpretty.register_uri('DELETE', delete_url, status=204) result = yield from provider.delete(path) assert aiohttpretty.has_call(method='DELETE', uri=delete_url) @async @pytest.mark.aiohttpretty def test_delete_not_existing(self, provider): with pytest.raises(exceptions.NotFoundError): yield from provider.delete(WaterButlerPath('/foobar/')) class TestMetadata: @async @pytest.mark.aiohttpretty def test_metadata_file_root(self, provider): path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], fixtures.list_file['items'][0]['id'])) list_file_url = provider.build_url('files', path.identifier) aiohttpretty.register_json_uri('GET', list_file_url, body=fixtures.list_file['items'][0]) result = yield from provider.metadata(path) expected = GoogleDriveFileMetadata(fixtures.list_file['items'][0], path).serialized() assert result == expected @async @pytest.mark.aiohttpretty def test_metadata_file_root_not_found(self, provider): path = '/birdie.jpg' path = WaterButlerPath('/birdie.jpg', _ids=(provider.folder['id'], None)) with pytest.raises(exceptions.MetadataError) as exc_info: yield from provider.metadata(path) assert exc_info.value.code == 404 @async @pytest.mark.aiohttpretty def test_metadata_file_nested(self, provider): path = GoogleDrivePath( '/hugo/kim/pins', _ids=[str(x) for x in range(4)] ) item = fixtures.generate_list(3)['items'][0] url = provider.build_url('files', path.identifier) aiohttpretty.register_json_uri('GET', url, body=item) result = yield from provider.metadata(path) expected = GoogleDriveFileMetadata(item, path).serialized() assert result == expected assert aiohttpretty.has_call(method='GET', uri=url) # @async # @pytest.mark.aiohttpretty # def test_metadata_file_nested_not_child(self, provider): # path = '/ed/sullivan/show.mp3' # query = provider._build_query(provider.folder['id'], title='ed') # url = provider.build_url('files', q=query, alt='json') # aiohttpretty.register_json_uri('GET', url, body={'items': []}) # with pytest.raises(exceptions.MetadataError) as exc_info: # yield from provider.metadata(path) # assert exc_info.value.code == 404 @async @pytest.mark.aiohttpretty def test_metadata_root_folder(self, provider): path = yield from provider.validate_path('/') query = provider._build_query(provider.folder['id']) list_file_url = provider.build_url('files', q=query, alt='json') aiohttpretty.register_json_uri('GET', list_file_url, body=fixtures.list_file) result = yield from provider.metadata(path) expected = GoogleDriveFileMetadata( fixtures.list_file['items'][0], path.child(fixtures.list_file['items'][0]['title']) ).serialized() assert result == [expected] @async @pytest.mark.aiohttpretty def test_metadata_folder_nested(self, provider): path = GoogleDrivePath( '/hugo/kim/pins/', _ids=[str(x) for x in range(4)] ) body = fixtures.generate_list(3) item = body['items'][0] query = provider._build_query(path.identifier) url = provider.build_url('files', q=query, alt='json') aiohttpretty.register_json_uri('GET', url, body=body) result = yield from provider.metadata(path) expected = GoogleDriveFileMetadata(item, path.child(item['title'])).serialized() assert result == [expected] assert aiohttpretty.has_call(method='GET', uri=url) @async @pytest.mark.aiohttpretty def test_folder_metadata(self, provider): path = GoogleDrivePath( '/hugo/kim/pins/', _ids=[str(x) for x in range(4)] ) body = fixtures.generate_list(3, **fixtures.folder_metadata) item = body['items'][0] query = provider._build_query(path.identifier) url = provider.build_url('files', q=query, alt='json') aiohttpretty.register_json_uri('GET', url, body=body) result = yield from provider.metadata(path) expected = GoogleDriveFolderMetadata(item, path.child(item['title'], folder=True)).serialized() assert result == [expected] assert aiohttpretty.has_call(method='GET', uri=url) class TestRevisions: @async @pytest.mark.aiohttpretty def test_get_revisions(self, provider): item = fixtures.list_file['items'][0] path = WaterButlerPath('/birdie.jpg', _ids=('doesntmatter', item['id'])) revisions_url = provider.build_url('files', item['id'], 'revisions') aiohttpretty.register_json_uri('GET', revisions_url, body=fixtures.revisions_list) result = yield from provider.revisions(path) expected = [ GoogleDriveRevision(each).serialized() for each in fixtures.revisions_list['items'] ] assert result == expected @async @pytest.mark.aiohttpretty def test_get_revisions_no_revisions(self, provider): item = fixtures.list_file['items'][0] metadata_url = provider.build_url('files', item['id']) revisions_url = provider.build_url('files', item['id'], 'revisions') path = WaterButlerPath('/birdie.jpg', _ids=('doesntmatter', item['id'])) aiohttpretty.register_json_uri('GET', metadata_url, body=item) aiohttpretty.register_json_uri('GET', revisions_url, body=fixtures.revisions_list_empty) result = yield from provider.revisions(path) expected = [ GoogleDriveRevision({ 'modifiedDate': item['modifiedDate'], 'id': fixtures.revisions_list_empty['etag'] + ds.DRIVE_IGNORE_VERSION, }).serialized() ] assert result == expected @async @pytest.mark.aiohttpretty def test_get_revisions_doesnt_exist(self, provider): with pytest.raises(exceptions.NotFoundError): yield from provider.revisions(WaterButlerPath('/birdie.jpg')) class TestCreateFolder: @async @pytest.mark.aiohttpretty def test_already_exists(self, provider): path = WaterButlerPath('/hugo/', _ids=('doesnt', 'matter')) with pytest.raises(exceptions.FolderNamingConflict) as e: yield from provider.create_folder(path) assert e.value.code == 409 assert e.value.message == 'Cannot create folder "{}" because a file or folder already exists at path "{}"'.format(path.name, str(path)) @async @pytest.mark.aiohttpretty def test_returns_metadata(self, provider): path = WaterButlerPath('/osf%20test/', _ids=(provider.folder['id'], None)) aiohttpretty.register_json_uri('POST', provider.build_url('files'), body=fixtures.folder_metadata) resp = yield from provider.create_folder(path) assert resp['kind'] == 'folder' assert resp['name'] == 'osf test' assert resp['path'] == '/osf%20test/' @async @pytest.mark.aiohttpretty def test_raises_non_404(self, provider): path = WaterButlerPath('/hugo/kim/pins/', _ids=(provider.folder['id'], 'something', 'something', None)) url = provider.build_url('files') aiohttpretty.register_json_uri('POST', url, status=418) with pytest.raises(exceptions.CreateFolderError) as e: yield from provider.create_folder(path) assert e.value.code == 418 @async @pytest.mark.aiohttpretty def test_must_be_folder(self, provider, monkeypatch): with pytest.raises(exceptions.CreateFolderError) as e: yield from provider.create_folder(WaterButlerPath('/carp.fish', _ids=('doesnt', 'matter')))
36.68172
143
0.672568
1,970
17,057
5.634518
0.104061
0.025946
0.033784
0.060811
0.798018
0.762883
0.732973
0.71973
0.661622
0.633243
0
0.004932
0.203494
17,057
464
144
36.760776
0.812086
0.042856
0
0.57716
0
0
0.092286
0.005641
0
0
0
0
0.12037
0
null
null
0
0.04321
null
null
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
403b3f687eb84b539bc5031ab50ad982b686ba48
22
py
Python
src/iolite/ice_ring/__init__.py
egrahl/iolite
064e30d9d7ec8c08f60c486cf9d6c48cca6562b5
[ "BSD-2-Clause" ]
null
null
null
src/iolite/ice_ring/__init__.py
egrahl/iolite
064e30d9d7ec8c08f60c486cf9d6c48cca6562b5
[ "BSD-2-Clause" ]
null
null
null
src/iolite/ice_ring/__init__.py
egrahl/iolite
064e30d9d7ec8c08f60c486cf9d6c48cca6562b5
[ "BSD-2-Clause" ]
1
2019-07-05T12:52:03.000Z
2019-07-05T12:52:03.000Z
#init file of ice_ring
22
22
0.818182
5
22
3.4
1
0
0
0
0
0
0
0
0
0
0
0
0.136364
22
1
22
22
0.894737
0.954545
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
4051e7cf049965226b0cd179ed313822266a6439
590
py
Python
pypy/interpreter/callbench/f04.py
kantai/passe-pypy-taint-tracking
b60a3663f8fe89892dc182c8497aab97e2e75d69
[ "MIT" ]
2
2016-07-06T23:30:20.000Z
2017-05-30T15:59:31.000Z
pypy/interpreter/callbench/f04.py
woodrow/pyoac
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
[ "MIT" ]
null
null
null
pypy/interpreter/callbench/f04.py
woodrow/pyoac
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
[ "MIT" ]
2
2020-07-09T08:14:22.000Z
2021-01-15T18:01:25.000Z
from sup import run def w(N, start): def f0(): pass def f1(a): pass def f2(a, b): pass def f3(a, b, c): pass def f4(a, b, c, d): pass def f5(a, b, c, d, e): pass start() i = 0 while i < N: f0() f0() f0() f1(1) f1(1) f2(1, 2) f3(1, 2, 3) f4(1, 2, 3, 4) f5(1, 2, 3, 4, 5) f0() f0() f0() f1(1) f1(1) f2(1, 2) f3(1, 2, 3) f4(1, 2, 3, 4) i+=1 run(w, 1000)
14.75
30
0.305085
96
590
1.875
0.302083
0.077778
0.083333
0.066667
0.3
0.3
0.3
0.3
0.3
0.3
0
0.205882
0.538983
590
39
31
15.128205
0.455882
0
0
0.611111
0
0
0
0
0
0
0
0
0
1
0.194444
false
0.166667
0.027778
0
0.222222
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
405a6e56f036c07f3a7d418af570ef381d7e1d4a
786
py
Python
sdk/python/pulumi_azure/containerservice/__init__.py
aangelisc/pulumi-azure
71dd9c75403146e16f7480e5a60b08bc0329660e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/containerservice/__init__.py
aangelisc/pulumi-azure
71dd9c75403146e16f7480e5a60b08bc0329660e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure/containerservice/__init__.py
aangelisc/pulumi-azure
71dd9c75403146e16f7480e5a60b08bc0329660e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from .. import _utilities import typing # Export this package's modules as members: from .get_cluster_node_pool import * from .get_kubernetes_cluster import * from .get_kubernetes_service_versions import * from .get_registry import * from .get_registry_scope_map import * from .get_registry_token import * from .group import * from .kubernetes_cluster import * from .kubernetes_cluster_node_pool import * from .registry import * from .registry_scope_map import * from .registry_token import * from .registry_webhook import * from .registry_webook import * from ._inputs import * from . import outputs
32.75
87
0.782443
113
786
5.221239
0.469027
0.254237
0.110169
0.10678
0.172881
0
0
0
0
0
0
0.001484
0.142494
786
23
88
34.173913
0.873887
0.278626
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
407910e2df8f6358089861805d83bb5fd5cc2349
3,793
py
Python
package/test/test_users_data_manager.py
QualiSystemsLab/cloudshell-training-workflow
95360acc5a180badc7c46788c2edf4e348b1d2e0
[ "Apache-2.0" ]
null
null
null
package/test/test_users_data_manager.py
QualiSystemsLab/cloudshell-training-workflow
95360acc5a180badc7c46788c2edf4e348b1d2e0
[ "Apache-2.0" ]
null
null
null
package/test/test_users_data_manager.py
QualiSystemsLab/cloudshell-training-workflow
95360acc5a180badc7c46788c2edf4e348b1d2e0
[ "Apache-2.0" ]
null
null
null
import unittest from mock import Mock from cloudshell.orch.training.services.users_data_manager import UsersDataManagerService, USERS_DICT_KEY class TestUsersDataManagerService(unittest.TestCase): def test_add(self): # arrange sandbox = Mock() users_data_manager = UsersDataManagerService(sandbox) user = 'user' # act users_data_manager.add_or_update(user, 'key', 'value1') # assert self.assertEqual('value1', users_data_manager._data[user]['key']) def test_update(self): # arrange sandbox = Mock() users_data_manager = UsersDataManagerService(sandbox) user = 'user' users_data_manager.add_or_update(user, 'key', 'value1') self.assertEqual('value1', users_data_manager._data[user]['key']) # act users_data_manager.add_or_update(user, 'key', 'value2') # assert self.assertEqual('value2', users_data_manager._data[user]['key']) def test_get(self): # arrange sandbox = Mock() users_data_manager = UsersDataManagerService(sandbox) user = 'user' users_data_manager.add_or_update(user, 'key1', 'value1') users_data_manager.add_or_update(user, 'key2', 'value2') # act user_data = users_data_manager.get(user) # assert self.assertTrue(user_data == {'key1': 'value1', 'key2': 'value2'}) def test_get_key(self): # arrange sandbox = Mock() users_data_manager = UsersDataManagerService(sandbox) user = 'user' users_data_manager.add_or_update(user, 'key1', 'value1') # act value = users_data_manager.get_key(user, 'key1') # assert self.assertEqual('value1', value) def test_get_key_no_such_key(self): # arrange sandbox = Mock() users_data_manager = UsersDataManagerService(sandbox) user = 'user' users_data_manager.add_or_update(user, 'key1', 'value1') # act value = users_data_manager.get_key(user, 'key2') # assert self.assertIsNone(value) def test_load_data_exists_in_server(self): # arrange sandbox = Mock() sandbox.automation_api = Mock() sandbox_data_kvp = [Mock(Key=USERS_DICT_KEY, Value='{"user": {"key1": "value1"}}'), Mock(Key='some_key', Value='some_value')] get_sandbox_data_return_val = Mock(SandboxDataKeyValues=sandbox_data_kvp) sandbox.automation_api.GetSandboxData = Mock(return_value=get_sandbox_data_return_val) users_data_manager = UsersDataManagerService(sandbox) # act users_data_manager.load() # assert self.assertTrue(users_data_manager._data == {'user': {'key1': 'value1'}}) def test_load_no_sandboxdata_from_server(self): # arrange sandbox = Mock() sandbox.automation_api = Mock() sandbox_data_kvp = [] sandbox.automation_api.GetSandboxData = Mock(return_value=Mock(SandboxDataKeyValues=sandbox_data_kvp)) users_data_manager = UsersDataManagerService(sandbox) # act users_data_manager.load() # assert self.assertTrue(users_data_manager._data == {}) def test_load_no_userdata_in_sandboxdata_from_server(self): # arrange sandbox = Mock() sandbox.automation_api = Mock() sandbox_data_kvp = [Mock(Key='some_key', Value='some_value')] sandbox.automation_api.GetSandboxData = Mock(return_value=Mock(SandboxDataKeyValues=sandbox_data_kvp)) users_data_manager = UsersDataManagerService(sandbox) # act users_data_manager.load() # assert self.assertTrue(users_data_manager._data == {})
31.87395
110
0.647245
415
3,793
5.568675
0.127711
0.109044
0.193855
0.076158
0.776287
0.764171
0.744267
0.70965
0.691908
0.611856
0
0.008766
0.248089
3,793
118
111
32.144068
0.801543
0.03981
0
0.575758
0
0
0.062483
0
0
0
0
0
0.136364
1
0.121212
false
0
0.045455
0
0.181818
0
0
0
0
null
0
1
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
40881f4a03693dee36da4969f0a8c71b0455d4b8
4,021
py
Python
tests/integration_test_single_hop/maximum_data_qubit_limit_check.py
AleBestia/QuNetSim
bae7ab9f925af5858c99af9691cd7fec05ae74b6
[ "MIT" ]
null
null
null
tests/integration_test_single_hop/maximum_data_qubit_limit_check.py
AleBestia/QuNetSim
bae7ab9f925af5858c99af9691cd7fec05ae74b6
[ "MIT" ]
null
null
null
tests/integration_test_single_hop/maximum_data_qubit_limit_check.py
AleBestia/QuNetSim
bae7ab9f925af5858c99af9691cd7fec05ae74b6
[ "MIT" ]
1
2020-09-17T15:43:34.000Z
2020-09-17T15:43:34.000Z
import sys import time sys.path.append("../..") from qunetsim.backends import CQCBackend from qunetsim.components.host import Host from qunetsim.components.network import Network from qunetsim.objects import Qubit def main(): print("Test maximum data qubit has been skipped.") return backend = CQCBackend() network = Network.get_instance() nodes = ["Alice", "Bob", "Eve", "Dean"] network.start(nodes, backend) network.delay = 0.7 hosts = {'alice': Host('Alice', backend), 'bob': Host('Bob', backend)} network.delay = 0 # A <-> B hosts['alice'].add_connection('Bob') hosts['bob'].add_connection('Alice') hosts['alice'].memory_limit = 1 hosts['bob'].memory_limit = 1 hosts['alice'].start() hosts['bob'].start() for h in hosts.values(): network.add_host(h) q_alice_id_1 = hosts['alice'].send_qubit(hosts['bob'].host_id, Qubit(hosts['alice'])) time.sleep(2) q_alice_id_2 = hosts['alice'].send_qubit(hosts['bob'].host_id, Qubit(hosts['alice'])) time.sleep(2) q_bob_id_1 = hosts['bob'].send_qubit(hosts['alice'].host_id, Qubit(hosts['bob'])) time.sleep(2) q_bob_id_2 = hosts['bob'].send_qubit(hosts['alice'].host_id, Qubit(hosts['bob'])) time.sleep(2) # Allow the network to process the requests # TODO: remove the need for this time.sleep(2) i = 0 while len(hosts['alice'].get_data_qubits(hosts['bob'].host_id)) < 1 and i < 5: time.sleep(1) i += 1 i = 0 while len(hosts['bob'].get_data_qubits(hosts['alice'].host_id)) < 1 and i < 5: time.sleep(1) i += 1 assert len(hosts['alice'].get_data_qubits(hosts['bob'].host_id)) == 1 assert hosts['alice'].get_data_qubit(hosts['bob'].host_id, q_bob_id_1).measure() == 0 assert hosts['alice'].get_data_qubit(hosts['bob'].host_id, q_bob_id_2) == None assert len(hosts['bob'].get_data_qubits(hosts['alice'].host_id)) == 1 assert hosts['bob'].get_data_qubit(hosts['alice'].host_id, q_alice_id_1).measure() == 0 assert hosts['bob'].get_data_qubit(hosts['alice'].host_id, q_alice_id_2) == None hosts['alice'].set_data_qubit_memory_limit(2, hosts['bob'].host_id) hosts['bob'].set_data_qubit_memory_limit(2) q_alice_id_1 = hosts['alice'].send_qubit(hosts['bob'].host_id, Qubit(hosts['alice'])) time.sleep(2) q_alice_id_2 = hosts['alice'].send_qubit(hosts['bob'].host_id, Qubit(hosts['alice'])) time.sleep(2) q_alice_id_3 = hosts['alice'].send_qubit(hosts['bob'].host_id, Qubit(hosts['alice'])) time.sleep(2) q_bob_id_1 = hosts['bob'].send_qubit(hosts['alice'].host_id, Qubit(hosts['bob'])) time.sleep(2) q_bob_id_2 = hosts['bob'].send_qubit(hosts['alice'].host_id, Qubit(hosts['bob'])) time.sleep(2) q_bob_id_3 = hosts['bob'].send_qubit(hosts['alice'].host_id, Qubit(hosts['bob'])) time.sleep(2) # Allow the network to process the requests time.sleep(3) i = 0 while len(hosts['alice'].get_data_qubits(hosts['bob'].host_id)) < 2 and i < 5: time.sleep(1) i += 1 i = 0 while len(hosts['bob'].get_data_qubits(hosts['alice'].host_id)) < 2 and i < 5: time.sleep(1) i += 1 assert len(hosts['alice'].get_data_qubits(hosts['bob'].host_id)) == 2 assert hosts['alice'].get_data_qubit(hosts['bob'].host_id, q_bob_id_1).measure() == 0 assert hosts['alice'].get_data_qubit(hosts['bob'].host_id, q_bob_id_2).measure() == 0 assert hosts['alice'].get_data_qubit(hosts['bob'].host_id, q_bob_id_3) == None assert len(hosts['bob'].get_data_qubits(hosts['alice'].host_id)) == 2 assert hosts['bob'].get_data_qubit(hosts['alice'].host_id, q_alice_id_1).measure() == 0 assert hosts['bob'].get_data_qubit(hosts['alice'].host_id, q_alice_id_2).measure() == 0 assert hosts['bob'].get_data_qubit(hosts['alice'].host_id, q_alice_id_3) == None print("All tests succesfull!") network.stop(True) exit() if __name__ == '__main__': main()
35.27193
91
0.648844
638
4,021
3.84326
0.126959
0.154976
0.085644
0.085644
0.721044
0.721044
0.701468
0.701468
0.701468
0.701468
0
0.020659
0.169361
4,021
113
92
35.584071
0.713473
0.030341
0
0.411765
0
0
0.106061
0
0
0
0
0.00885
0.164706
1
0.011765
false
0
0.070588
0
0.094118
0.023529
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4097d7013b5e672ad5fcc666be96ff3bfc913b44
399
py
Python
stubs.min/System/Runtime/InteropServices/__init___parts/OutAttribute.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
1
2017-07-25T14:30:18.000Z
2017-07-25T14:30:18.000Z
stubs.min/System/Runtime/InteropServices/__init___parts/OutAttribute.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
stubs.min/System/Runtime/InteropServices/__init___parts/OutAttribute.py
hdm-dt-fb/ironpython-stubs
4d2b405eda3ceed186e8adca55dd97c332c6f49d
[ "MIT" ]
null
null
null
class OutAttribute(Attribute,_Attribute): """ Indicates that data should be marshaled from callee back to caller. OutAttribute() """ def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass
39.9
215
0.716792
49
399
5
0.530612
0.183673
0.195918
0.232653
0.461224
0.461224
0.461224
0.461224
0.461224
0.461224
0
0
0.155388
399
9
216
44.333333
0.727003
0.726817
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.333333
0
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
409838f7dfc95ed71f915a0e702866abb8ab62d7
120
py
Python
nalapi/action/__init__.py
vibby/nalapi
140d388b2935295db9f790d884d3c86690d30fc0
[ "MIT" ]
null
null
null
nalapi/action/__init__.py
vibby/nalapi
140d388b2935295db9f790d884d3c86690d30fc0
[ "MIT" ]
null
null
null
nalapi/action/__init__.py
vibby/nalapi
140d388b2935295db9f790d884d3c86690d30fc0
[ "MIT" ]
null
null
null
"""Action package for the nalapi.""" import os import sys sys.path.append(os.path.dirname(os.path.realpath(__file__)))
20
60
0.75
19
120
4.526316
0.684211
0.139535
0
0
0
0
0
0
0
0
0
0
0.091667
120
5
61
24
0.788991
0.25
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
40e3078879673ce6410f05a41a4762b811195581
4,445
py
Python
deps/cache/tests.py
SysKillerDev/virtkick-backend
62cad6baf2d53e3abb71bfc5862ba1180a1b5acf
[ "Apache-2.0" ]
null
null
null
deps/cache/tests.py
SysKillerDev/virtkick-backend
62cad6baf2d53e3abb71bfc5862ba1180a1b5acf
[ "Apache-2.0" ]
null
null
null
deps/cache/tests.py
SysKillerDev/virtkick-backend
62cad6baf2d53e3abb71bfc5862ba1180a1b5acf
[ "Apache-2.0" ]
1
2021-07-18T14:09:37.000Z
2021-07-18T14:09:37.000Z
# coding=utf-8 import random import time import threading import unittest from lru_cache import LruCache class TesLruCache(unittest.TestCase): def test_cache_normal(self): a = [] @LruCache(maxsize=2, timeout=1) def bar(num): a.append(num) return num bar(1) bar(1) self.assertEqual(a, [1]) def test_cache_none(self): a = [] @LruCache(maxsize=2, timeout=1) def bar(num): a.append(num) return None bar(1) bar(1) self.assertEqual(a, [1]) def test_cache_when_timeout(self): a = [] @LruCache(maxsize=2, timeout=1) def bar(num): a.append(num) return num bar(2) time.sleep(2) bar(2) self.assertEqual(a, [2, 2]) def test_cache_when_cache_is_full(self): a = [] @LruCache(maxsize=2, timeout=1) def bar(num): a.append(num) return num bar(1) bar(2) bar(3) bar(1) self.assertEqual(a, [1, 2, 3, 1]) def test_cache_with_multi_thread(self): a = [] @LruCache(maxsize=10, timeout=1) def bar(num): a.append(num) return num for i in xrange(10): threading.Thread(target=bar, args=(i, )).start() main_thread = threading.currentThread() for t in threading.enumerate(): if t is not main_thread: t.join() bar(random.randint(0, 9)) self.assertEqual(set(a), set(range(10))) def test_cache_with_multi_thread_two_func(self): a = [] @LruCache(maxsize=10, timeout=1) def bar(num): a.append(num) return num b = [] @LruCache(maxsize=10, timeout=1) def bar(num): b.append(num) return num + 1 for i in xrange(10): threading.Thread(target=bar, args=(i, )).start() threading.Thread(target=bar, args=(i, )).start() main_thread = threading.currentThread() for t in threading.enumerate(): if t is not main_thread: t.join() feed = random.randint(0, 9) self.assertEqual(bar(feed), feed) self.assertEqual(bar(feed), feed + 1) self.assertEqual(set(a), set(range(10))) self.assertEqual(set(b), set(range(10))) def test_cache_when_timeout_and_maxsize_is_none(self): a = [] @LruCache() def bar(num): a.append(num) return num bar(1) bar(1) self.assertEqual(a, [1]) def test_cache_when_timeout_is_none(self): a = [] @LruCache(maxsize=10) def bar(num): a.append(num) return num bar(1) bar(1) self.assertEqual(a, [1]) def test_cache_when_only_maxsize_is_none_normal(self): a = [] @LruCache(timeout=2) def bar(num): a.append(num) return num bar(1) bar(1) self.assertEqual(a, [1]) def test_cache_when_only_maxsize_is_none_timeout(self): a = [] @LruCache(timeout=1) def bar(num): a.append(num) return num bar(1) time.sleep(2) bar(1) self.assertEqual(a, [1, 1]) def test_cache_when_only_maxsize_is_none_normal_method(self): a = [] class Func(object): @LruCache(timeout=2) def bar(self, num): a.append(num) return num fun = Func() fun.bar(1) fun.bar(1) self.assertEqual(a, [1]) def test_cache_when_only_maxsize_is_none_normal_method_timeout(self): a = [] class Func(object): @LruCache(timeout=1) def bar(self, num): a.append(num) return num fun = Func() fun.bar(1) time.sleep(2) fun.bar(1) self.assertEqual(a, [1, 1]) def test_invalidate(self): a = [] @LruCache() def bar(num): a.append(num) return num bar(1) bar(1) self.assertEqual(a, [1]) bar.invalidate(1) bar(1) self.assertEqual(a, [1, 1]) if __name__ == "__main__": unittest.main()
21.57767
73
0.503487
549
4,445
3.932605
0.125683
0.038907
0.097267
0.078277
0.84113
0.779991
0.71422
0.641501
0.625753
0.602594
0
0.031815
0.377728
4,445
205
74
21.682927
0.748735
0.0027
0
0.775641
0
0
0.001805
0
0
0
0
0
0.108974
1
0.173077
false
0
0.032051
0
0.314103
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
40ed6241a92c044295305141e640527ecaa7dc32
629
py
Python
doctest/rundoctests.py
orbnauticus/silk
a139e11aaf824d593c9a828851c9e9d6b287000c
[ "BSD-3-Clause" ]
null
null
null
doctest/rundoctests.py
orbnauticus/silk
a139e11aaf824d593c9a828851c9e9d6b287000c
[ "BSD-3-Clause" ]
null
null
null
doctest/rundoctests.py
orbnauticus/silk
a139e11aaf824d593c9a828851c9e9d6b287000c
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import doctest import silk doctest.testmod(silk) import silk.webdoc doctest.testmod(silk.webdoc) doctest.testmod(silk.webdoc.node) doctest.testmod(silk.webdoc.css) doctest.testmod(silk.webdoc.html) doctest.testmod(silk.webdoc.html.common) import silk.webdoc.stencil doctest.testmod(silk.webdoc.stencil) import silk.webdoc.html.v4 import silk.webdoc.html.v5 doctest.testmod(silk.webdoc.html.v4) doctest.testmod(silk.webdoc.html.v5) import silk.webreq doctest.testmod(silk.webreq) import silk.webdb doctest.testmod(silk.webdb) doctest.testmod(silk.webdb.drivers) doctest.testmod(silk.webdb.drivers.base)
22.464286
40
0.81558
95
629
5.4
0.210526
0.354776
0.45614
0.374269
0.510721
0.220273
0
0
0
0
0
0.006745
0.057234
629
27
41
23.296296
0.858347
0.031797
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.380952
0
0.380952
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
40f30480446accbd944ba99a09ae7d743a27b5d0
1,088
py
Python
setup.py
lx183/ansible_remote_checks
d7012a1e4f1fdae52d05ca4a93f14f8947f20e25
[ "MIT" ]
null
null
null
setup.py
lx183/ansible_remote_checks
d7012a1e4f1fdae52d05ca4a93f14f8947f20e25
[ "MIT" ]
null
null
null
setup.py
lx183/ansible_remote_checks
d7012a1e4f1fdae52d05ca4a93f14f8947f20e25
[ "MIT" ]
1
2019-08-20T13:19:16.000Z
2019-08-20T13:19:16.000Z
from setuptools import setup, find_packages setup( name='ansible_remote_checks', version='0.1.1', description='Library for Nagios checks on linux systems', author='Alexander Lex, David Voit, Christian Zuegner', author_email='Alexander.Lex@osram-os.com, David.Voit@osram-os.com, Christian.Zuegner@osram-os.com', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: System :: Monitoring', 'Programming Language :: Python :: 2', ], install_requires=[ 'ansible' 'argparse', ], packages=find_packages('src'), package_dir={'':'src'}, scripts=[ 'nagios_checks/check_cpu_ansible', 'nagios_checks/check_file_ansible', 'nagios_checks/check_fs_ansible', 'nagios_checks/check_memory_ansible', 'nagios_checks/check_service_ansible', 'nagios_checks/check_update_ansible', 'nagios_checks/check_swap_ansible', 'nagios_checks/check_load_ansible', 'nagios_checks/check_sg_ansible', 'nagios_checks/check_process_ansible', 'nagios_checks/check_directory_ansible' ] )
27.2
101
0.716912
129
1,088
5.736434
0.503876
0.194595
0.252703
0.324324
0
0
0
0
0
0
0
0.005435
0.154412
1,088
39
102
27.897436
0.798913
0
0
0.060606
0
0.030303
0.646734
0.426863
0
0
0
0
0
1
0
true
0
0.030303
0
0.030303
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
40f7c8c96f55523f0e9f806e889bed6c8e7c5b91
38
py
Python
django_task/exceptions.py
morlandi/django-task
19c00fd2f73e60c0c11a33fe195546f567f29361
[ "MIT" ]
46
2017-11-02T22:23:14.000Z
2022-02-16T11:56:58.000Z
django_task/exceptions.py
morlandi/django-task
19c00fd2f73e60c0c11a33fe195546f567f29361
[ "MIT" ]
10
2018-08-28T06:56:14.000Z
2021-12-27T17:49:30.000Z
django_task/exceptions.py
morlandi/django-task
19c00fd2f73e60c0c11a33fe195546f567f29361
[ "MIT" ]
6
2018-02-01T12:26:02.000Z
2021-09-07T11:13:04.000Z
class TaskError(Exception): pass
9.5
27
0.710526
4
38
6.75
1
0
0
0
0
0
0
0
0
0
0
0
0.210526
38
3
28
12.666667
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
dc09496db3a8ef7298cfb5618a32c0d969f4dcb8
117
py
Python
codigos/Cap02/exemplo002.py
skunkworksdev/Ifes_Algoritmo
458ef73a304573c12b45d8afae38e13ae0f3354f
[ "MIT" ]
null
null
null
codigos/Cap02/exemplo002.py
skunkworksdev/Ifes_Algoritmo
458ef73a304573c12b45d8afae38e13ae0f3354f
[ "MIT" ]
null
null
null
codigos/Cap02/exemplo002.py
skunkworksdev/Ifes_Algoritmo
458ef73a304573c12b45d8afae38e13ae0f3354f
[ "MIT" ]
null
null
null
# negativadade da decisão # if(False): # print('aula de algoritmos') a = 2 if(a == 1): print('aula de algoritmos')
14.625
29
0.649573
18
117
4.222222
0.666667
0.236842
0.289474
0.552632
0
0
0
0
0
0
0
0.020833
0.179487
117
8
30
14.625
0.770833
0.529915
0
0
0
0
0.346154
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dc0c6c141e5d9e31cb0c360f6e59c2fe803f3cc6
69
py
Python
arushi_gupta_101703106_outlier/__init__.py
Arushi872/arushi_gupta_101703106_outlier
220abaeeeb2cc2bcdae27a7921160a002b89560d
[ "MIT" ]
null
null
null
arushi_gupta_101703106_outlier/__init__.py
Arushi872/arushi_gupta_101703106_outlier
220abaeeeb2cc2bcdae27a7921160a002b89560d
[ "MIT" ]
null
null
null
arushi_gupta_101703106_outlier/__init__.py
Arushi872/arushi_gupta_101703106_outlier
220abaeeeb2cc2bcdae27a7921160a002b89560d
[ "MIT" ]
null
null
null
from arushi_gupta_101703106_outlier.outlier_cmd import outlier_remove
69
69
0.942029
10
69
6
0.8
0
0
0
0
0
0
0
0
0
0
0.136364
0.043478
69
1
69
69
0.772727
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
dc19b3dab6e5c6452d6ab19cc1149d9c7f1afa97
2,338
py
Python
tests/test_document_mapping.py
Alzpeta/oarepo-documents
e3dc85fad7a2d03d13d9d3a902faebb21bae3522
[ "MIT" ]
null
null
null
tests/test_document_mapping.py
Alzpeta/oarepo-documents
e3dc85fad7a2d03d13d9d3a902faebb21bae3522
[ "MIT" ]
3
2021-01-19T12:59:37.000Z
2021-03-19T20:52:04.000Z
tests/test_document_mapping.py
Alzpeta/oarepo-documents
e3dc85fad7a2d03d13d9d3a902faebb21bae3522
[ "MIT" ]
5
2020-10-24T16:04:25.000Z
2020-10-28T10:42:23.000Z
from oarepo_documents.document_json_mapping import schema_mapping def test_createRecord(app, db, client): existing_document = {"keywords": ["X", "yy", "kchchch", "K", "J", "x", "xxx", "xxxxxx"], "titles": ["Nějaká úžasná česká věta"]} data = schema_mapping(existing_document, 'doi') assert data == {'alternative_identifiers': [{'scheme': 'DOI', 'value': 'doi'}], 'authors': [{'full_name': 'Various authors'}], 'keywords': [{'value': 'X yy kchchch'}, {'value': 'K'}, {'value': 'J x xxx xxxxxx'}], 'document_type': 'unknown', 'publication_year': 'unknown', 'title': {"cs": "Nějaká úžasná česká věta"}} existing_document = {"titles": "Willst du bis der Tod euch scheidet treu ihr sein für alle Tage? Nein!", "authors": [{"given": "givenname", "family": "familyname"}, {"given": "givenname1", "family": "familyname2"}]} data = schema_mapping(existing_document, 'doi') assert data == {'alternative_identifiers': [{'scheme': 'DOI', 'value': 'doi'}], 'authors': [{'full_name': 'givenname familyname'}, {'full_name': 'givenname1 familyname2'}], 'document_type': 'unknown', 'publication_year': 'unknown', 'title': {"de": "Willst du bis der Tod euch scheidet treu ihr sein für alle Tage? Nein!"} } existing_document = {"titles": "Willst du bis der Tod euch scheidet treu ihr sein für alle Tage? Nein!", "authors": [{"given": "givenname", "family": "familyname"}, {"given": "givenname1", "family": "familyname2"}]} data = schema_mapping(existing_document, 'doi') assert data == {'alternative_identifiers': [{'scheme': 'DOI', 'value': 'doi'}], 'authors': [{'full_name': 'givenname familyname'}, {'full_name': 'givenname1 familyname2'}], 'document_type': 'unknown', 'publication_year': 'unknown', 'title': {"de": "Willst du bis der Tod euch scheidet treu ihr sein für alle Tage? Nein!"} }
64.944444
135
0.517536
216
2,338
5.472222
0.305556
0.081218
0.037225
0.047377
0.795262
0.795262
0.795262
0.756345
0.756345
0.756345
0
0.005057
0.323353
2,338
36
136
64.944444
0.742099
0
0
0.606061
0
0
0.430953
0.0295
0
0
0
0
0.090909
1
0.030303
false
0
0.030303
0
0.060606
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
90585d0503a29d9cbc3d2dda68759a6053ff0f95
3,168
py
Python
SpectMHC_main.py
Prathyusha-konda/netMHC_DB_scripts
f9c87e00dc0848644993d8671d72078b165c4955
[ "MIT" ]
2
2016-10-24T19:18:42.000Z
2018-09-14T07:54:23.000Z
SpectMHC_main.py
Prathyusha-konda/SpectMHC
f9c87e00dc0848644993d8671d72078b165c4955
[ "MIT" ]
null
null
null
SpectMHC_main.py
Prathyusha-konda/SpectMHC
f9c87e00dc0848644993d8671d72078b165c4955
[ "MIT" ]
null
null
null
import SpectMHC_methods if SpectMHC_methods.install_check("verify") in 'yes': print "\nOk, Let's move ahead" version = SpectMHC_methods.version("verify version") split_output=raw_input("\nDo you want to split your parent file? yes or no [not mandatory, please read instructions]: ") if split_output in 'yes': split_list=SpectMHC_methods.split_files("split") check1 = raw_input("\nDo you want to save your splitfiles after we complete the execution? yes or no: ") check2 = raw_input("\nDo you want the output in fasta format? note: this includes processing after netMHC output, yes or no: ") if check2 in 'yes': if version in '3.4': cut_off=float(raw_input("\nWhat is the binding affinity cut_off you are interested in? netmhc suggests 50 for strong binders and 500 for weak binders: ")) else: cut_off=float(raw_input("\nWhat is the cut_off Rank you are interested in? netMHC suggests cutoff rank of 0.5 for strong binders and 2 for weak binders: ")) check3 = raw_input("\nDo you want to save your raw netMHC output files along with the fasta output? yes or no: ") outfile_list=SpectMHC_methods.executemhc(version, split_list) print "\nWait while your data is processed into fasta format..." if check1 in 'no': SpectMHC_methods.del_temp_files(split_list) elif check1 in 'yes': print "\nSplit files are saved to your computer..." if check2 in 'yes': SpectMHC_methods.process_data(version, outfile_list, cut_off) if check3 in 'yes': print "\nThe execution is complete. Check your output files in the folder..." elif check3 in 'no': SpectMHC_methods.del_temp_files(outfile_list) elif check2 in 'no': SpectMHC_methods.executemhc(version, split_list) if check1 in 'no': SpectMHC_methods.del_temp_files(split_list) elif check1 in 'yes': print "\nSplit files are saved to your computer..." elif split_output in 'no': files = raw_input("\nPlease enter your file names, if you have multiple files, separate them by space. ex- 1.fsa 2.fsa 3.fsa: ") input_files = files.split() check2 = raw_input("\nDo you want the output in fasta format? yes or no: ") if check2 in 'yes': if version in '3.4': cut_off=float(raw_input("\nWhat is the binding affinity cut_off you are interested in? netmhc suggests 50 for strong binders and 500 for weak binders: ")) else: cut_off=float(raw_input("\nWhat is the cut_off Rank you are interested in? netMHC suggests cutoff rank of 0.5 for strong binders and 2 for weak binders : ")) check3 = raw_input("\nDo you want to save your raw netMHC output files along with the fasta output? yes or no: ") file_list=SpectMHC_methods.executemhc(version, input_files) print "\nWait while your data is processed into fasta format" if check2 in 'yes': SpectMHC_methods.process_data(version, file_list, cut_off) if check3 in 'yes': print "\nThe execution is complete. Check your output files in the folder." elif check3 in 'no': SpectMHC_methods.del_temp_files(file_list) elif check2 in 'no': SpectMHC_methods.executemhc(version, input_files)
40.615385
164
0.720328
499
3,168
4.44489
0.218437
0.09468
0.029757
0.037872
0.770063
0.766456
0.711452
0.711452
0.698828
0.613165
0
0.016464
0.19476
3,168
77
165
41.142857
0.852999
0
0
0.528302
0
0.169811
0.492424
0
0
0
0
0
0
0
null
null
0
0.018868
null
null
0.132075
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
906efd1bdc48f1dbb44f2c428ffd7b3687d8407f
3,479
py
Python
july/tests.py
jesstess/julython.org
1c3044b1cca06cf47ab5a603b72533dd69fb094e
[ "MIT" ]
1
2020-08-11T02:42:45.000Z
2020-08-11T02:42:45.000Z
july/tests.py
jesstess/julython.org
1c3044b1cca06cf47ab5a603b72533dd69fb094e
[ "MIT" ]
null
null
null
july/tests.py
jesstess/julython.org
1c3044b1cca06cf47ab5a603b72533dd69fb094e
[ "MIT" ]
null
null
null
import datetime import mock from django.test import TestCase class JulyViews(TestCase): def test_index(self): resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_help(self): resp = self.client.get('/help/') self.assertEqual(resp.status_code, 200) def test_live(self): resp = self.client.get('/live/') self.assertEqual(resp.status_code, 200) def test_register_get(self): resp = self.client.get('/register/') self.assertEqual(resp.status_code, 200) def test_register_bad(self): resp = self.client.post('/register/', {'Bad': 'field'}) self.assertEqual(resp.status_code, 200) self.assertContains(resp, "This field is required.") def test_register_good(self): post = { 'username': 'fred', 'password1': 'secret', 'password2': 'secret' } resp = self.client.post('/register/', post) self.assertRedirects(resp, '/') class AbuseTests(TestCase): def test_set_abuse(self): from django.conf import settings settings.ABUSE_LIMIT = 3 # 3 times ! from middleware import AbuseMiddleware today = datetime.date.today() request = mock.MagicMock() request.session = {} mid = AbuseMiddleware() abuse_reported = mid._abuse_reported(request) can_report_abuse = mid._can_report_abuse(request) abuse_reported() # one self.assertEqual( request.session['abuse_date'], today - datetime.timedelta(days=2), ) self.assertTrue(can_report_abuse()) abuse_reported() # two self.assertEqual( request.session['abuse_date'], today - datetime.timedelta(days=1), ) self.assertTrue(can_report_abuse()) abuse_reported() # tree self.assertEqual( request.session['abuse_date'], today, ) self.assertFalse(can_report_abuse()) # game is over ! def test_reset_abuse(self): from django.conf import settings settings.ABUSE_LIMIT = 3 from middleware import AbuseMiddleware today = datetime.date.today() request = mock.MagicMock() request.session = {'abuse_date': today-datetime.timedelta(days=10)} mid = AbuseMiddleware() abuse_reported = mid._abuse_reported(request) can_report_abuse = mid._can_report_abuse(request) abuse_reported() # if abuse_date is old enugh it should be reseted self.assertEqual( request.session['abuse_date'], today - datetime.timedelta(days=2), ) self.assertTrue(can_report_abuse()) request.session = {'abuse_date': today-datetime.timedelta(days=3)} abuse_reported() self.assertEqual( request.session['abuse_date'], today - datetime.timedelta(days=2), ) self.assertTrue(can_report_abuse()) request.session = {'abuse_date': today-datetime.timedelta(days=2)} abuse_reported() self.assertEqual( request.session['abuse_date'], today - datetime.timedelta(days=1), ) self.assertTrue(can_report_abuse()) abuse_reported() self.assertEqual( request.session['abuse_date'], today, ) self.assertFalse(can_report_abuse())
28.991667
75
0.602759
368
3,479
5.513587
0.203804
0.088714
0.075899
0.113356
0.814194
0.747166
0.731395
0.724988
0.669295
0.622474
0
0.011661
0.285139
3,479
119
76
29.235294
0.804182
0.024432
0
0.602151
0
0
0.064069
0
0
0
0
0
0.225806
1
0.086022
false
0.021505
0.075269
0
0.182796
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
908a22ed75174443f2078e09459790302a65609d
175
py
Python
src/DCGMM/experiment/__init__.py
anon-scientist/dcgmm
1d2d96d1d9811c387ee11d462ff0a3819a66e137
[ "Apache-2.0", "MIT" ]
null
null
null
src/DCGMM/experiment/__init__.py
anon-scientist/dcgmm
1d2d96d1d9811c387ee11d462ff0a3819a66e137
[ "Apache-2.0", "MIT" ]
null
null
null
src/DCGMM/experiment/__init__.py
anon-scientist/dcgmm
1d2d96d1d9811c387ee11d462ff0a3819a66e137
[ "Apache-2.0", "MIT" ]
null
null
null
## only executable python files here. Invoke with python3 -m experiment.Experiment_GMM <args..> from .Experiment import Experiment from .Experiment_GMM import Experiment_GMM
35
95
0.817143
23
175
6.086957
0.608696
0.278571
0
0
0
0
0
0
0
0
0
0.006494
0.12
175
4
96
43.75
0.902597
0.525714
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
90c451e0b2a3a133eab16335be9e348ec9c16ddd
182
py
Python
configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_karibuilding.py
ldg810/mmsegmentation-karibuilding
6fb7523d96b9cb78c8bbcd03038ef7cc19abd307
[ "Apache-2.0" ]
1
2022-01-26T12:48:02.000Z
2022-01-26T12:48:02.000Z
configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_karibuilding.py
ldg810/mmsegmentation-karibuilding
6fb7523d96b9cb78c8bbcd03038ef7cc19abd307
[ "Apache-2.0" ]
null
null
null
configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_karibuilding.py
ldg810/mmsegmentation-karibuilding
6fb7523d96b9cb78c8bbcd03038ef7cc19abd307
[ "Apache-2.0" ]
1
2022-02-07T07:09:12.000Z
2022-02-07T07:09:12.000Z
_base_ = [ '../_base_/models/deeplabv3_r50-d8-karibuilding.py', '../_base_/datasets/karibuilding.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
36.4
94
0.697802
21
182
5.428571
0.619048
0.157895
0.315789
0
0
0
0
0
0
0
0
0.036145
0.087912
182
4
95
45.5
0.650602
0
0
0
0
0
0.802198
0.802198
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
292350096bcf1e3c7d94c02e1b67038fe2d5fbd3
32
py
Python
smook/src/main/resources/__run__.py
zlatozar/coding-sessions
1f59b2b04dee2116cc9baeae744bd4322fc789ad
[ "BSD-3-Clause" ]
null
null
null
smook/src/main/resources/__run__.py
zlatozar/coding-sessions
1f59b2b04dee2116cc9baeae744bd4322fc789ad
[ "BSD-3-Clause" ]
null
null
null
smook/src/main/resources/__run__.py
zlatozar/coding-sessions
1f59b2b04dee2116cc9baeae744bd4322fc789ad
[ "BSD-3-Clause" ]
null
null
null
import examples examples.main()
10.666667
15
0.8125
4
32
6.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.09375
32
3
16
10.666667
0.896552
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
46412c1e4db29897dc5f7101af361461ea622684
190
py
Python
ex027.py
ranierelm/Python_exercise
db143d2dc019bbd479397dc64efbe4fe0aa1abab
[ "MIT" ]
null
null
null
ex027.py
ranierelm/Python_exercise
db143d2dc019bbd479397dc64efbe4fe0aa1abab
[ "MIT" ]
null
null
null
ex027.py
ranierelm/Python_exercise
db143d2dc019bbd479397dc64efbe4fe0aa1abab
[ "MIT" ]
null
null
null
nome = str(input('Digite seu nome completo: ')).strip() print('Primeiro nome: {}'.format(nome.split()[0])) ultimo = nome.count(' ') print('Último nome: {}'.format(nome.split()[ultimo]))
38
56
0.642105
25
190
4.88
0.6
0.163934
0.229508
0.311475
0
0
0
0
0
0
0
0.005917
0.110526
190
4
57
47.5
0.715976
0
0
0
0
0
0.317204
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
467b43fc6adbfae53af0dea45b95c75d70a7c366
118
py
Python
metaworkflow/main.py
shteou/kp
b51d485edf5719aaf68bd24cb53cf7302894355d
[ "MIT" ]
null
null
null
metaworkflow/main.py
shteou/kp
b51d485edf5719aaf68bd24cb53cf7302894355d
[ "MIT" ]
null
null
null
metaworkflow/main.py
shteou/kp
b51d485edf5719aaf68bd24cb53cf7302894355d
[ "MIT" ]
null
null
null
#!/usr/bin/env python import metaworkflow.webhooks if __name__ == "__main__": metaworkflow.webhooks.start_server()
19.666667
38
0.771186
14
118
5.857143
0.857143
0.487805
0
0
0
0
0
0
0
0
0
0
0.101695
118
5
39
23.6
0.773585
0.169492
0
0
0
0
0.082474
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
469c6949ddbff22f38bf654d310b44d5ef5d0145
58
py
Python
proxy2/__main__.py
cleberzavadniak/proxy2-original
728a115421ae695a2c75194a7404fadd704e02c7
[ "BSD-3-Clause" ]
null
null
null
proxy2/__main__.py
cleberzavadniak/proxy2-original
728a115421ae695a2c75194a7404fadd704e02c7
[ "BSD-3-Clause" ]
null
null
null
proxy2/__main__.py
cleberzavadniak/proxy2-original
728a115421ae695a2c75194a7404fadd704e02c7
[ "BSD-3-Clause" ]
null
null
null
#!env python2 from . import http_proxy http_proxy.run()
9.666667
24
0.741379
9
58
4.555556
0.777778
0.439024
0
0
0
0
0
0
0
0
0
0.020408
0.155172
58
5
25
11.6
0.816327
0.206897
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
46a0ca657b4c75b79328f004c20fe7c95c5be0b5
171
py
Python
external/model-preparation-algorithm/mpa_tasks/apis/__init__.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
775
2019-03-01T02:13:33.000Z
2020-09-07T22:49:15.000Z
external/model-preparation-algorithm/mpa_tasks/apis/__init__.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
229
2019-02-28T21:37:08.000Z
2020-09-07T15:11:49.000Z
external/model-preparation-algorithm/mpa_tasks/apis/__init__.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
290
2019-02-28T20:32:11.000Z
2020-09-07T05:51:41.000Z
# Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # from .config import BaseConfig, TrainType, LearningRateSchedule from .task import BaseTask
24.428571
63
0.795322
21
171
6.47619
0.904762
0
0
0
0
0
0
0
0
0
0
0.04
0.122807
171
6
64
28.5
0.866667
0.421053
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d3db0a29e12f95980fc8d4fe3a8f309eadd4a583
62,697
py
Python
experiments/experiments_gdsc/convergence/nmf_icm.py
ThomasBrouwer/BNMTF
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
[ "Apache-2.0" ]
16
2017-04-19T12:04:47.000Z
2021-12-03T00:50:43.000Z
experiments/experiments_gdsc/convergence/nmf_icm.py
ThomasBrouwer/BNMTF
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
[ "Apache-2.0" ]
1
2017-04-20T11:26:16.000Z
2017-04-20T11:26:16.000Z
experiments/experiments_gdsc/convergence/nmf_icm.py
ThomasBrouwer/BNMTF
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
[ "Apache-2.0" ]
8
2015-12-15T05:29:43.000Z
2019-06-05T03:14:11.000Z
""" Run NMF ICM on the Sanger dataset. We can plot the MSE, R2 and Rp as it converges, on the entire dataset. We give flat priors (1/10). """ import sys, os project_location = os.path.dirname(__file__)+"/../../../../" sys.path.append(project_location) from BNMTF.code.models.nmf_icm import nmf_icm from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc import numpy, matplotlib.pyplot as plt ########## standardised = False #standardised Sanger or unstandardised iterations = 1000 init_UV = 'random' I, J, K = 622,138,25 minimum_TN = 0.1 alpha, beta = 1., 1. #1., 1. lambdaU = numpy.ones((I,K))/10. lambdaV = numpy.ones((J,K))/10. priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV } # Load in data (_,R,M,_,_,_,_) = load_gdsc(standardised=standardised) # Run the VB algorithm NMF = nmf_icm(R,M,K,priors) NMF.initialise(init_UV) NMF.run(iterations,minimum_TN=minimum_TN) # Extract the performances across all iterations print "icm_all_performances = %s" % NMF.all_performances ''' icm_all_performances = {'R^2': [-134.4795537271895, -1.552667222593644, 0.7275416432525234, 0.758957405027868, 0.7737942869115458, 0.7829400027674102, 0.7886893205578526, 0.7933959748585445, 0.7999895644183418, 0.8057333749741846, 0.8113092511467778, 0.8162529860079986, 0.8211735747724334, 0.8256623632852198, 0.8300367710010396, 0.83353230816861, 0.8366509062473786, 0.8393397993801739, 0.8418402113542578, 0.8445427233712197, 0.8473152145379012, 0.849740538837672, 0.852135469333717, 0.8543435416944982, 0.8565009180382597, 0.8588123120360168, 0.8608809896558028, 0.8628519352579829, 0.8646655629823482, 0.8667641800223356, 0.8689211294002801, 0.8708126647285276, 0.8725352789916565, 0.8742943992342669, 0.8761250988891495, 0.877950474762221, 0.8797836518397667, 0.8814017750783866, 0.8829433953845087, 0.8846011349620354, 0.8862328076321098, 0.8877516686607005, 0.8892724935771723, 0.8907328780837084, 0.8920655636547214, 0.8933718215621492, 0.8946900939300994, 0.8961018424928876, 0.8974498278712894, 0.8986083310934327, 0.8995947777446518, 0.9004511181105159, 0.9012070741991316, 0.9018819660284048, 0.9024888590762477, 0.9030379569393765, 0.9035372710494516, 0.9039955717123929, 0.9044196502482333, 0.9048136001573491, 0.9051796655834687, 0.9055219955020183, 0.9058427445637453, 0.9061448797675761, 0.9064284372283268, 0.9066964063334187, 0.9069505308581882, 0.9071911745504364, 0.9074201031576852, 0.9076379564378602, 0.9078457654074116, 0.9080444337745052, 0.9082346754158075, 0.9084172706367779, 0.9085927992074442, 0.9087614267777145, 0.9089240705230237, 0.9090807442738802, 0.9092319068097902, 0.9093780706311573, 0.9095195835822631, 0.9096567068549841, 0.9097896712662532, 0.909918828314758, 0.910044401774638, 0.9101666524693738, 0.9102856578409743, 0.9104014814095568, 0.9105142808103779, 0.9106241366116565, 0.9107312497265956, 0.9108357570164469, 0.9109377187964129, 0.9110372524865092, 0.9111345379524156, 0.9112295958942499, 0.9113226032935826, 0.911413607540165, 0.9115026708061679, 0.9115898401973733, 0.91167522885156, 0.911758929603397, 0.9118409483151302, 0.9119212531349614, 0.9119999846478792, 0.9120772668519438, 0.912153156821094, 0.9122277102823301, 0.9123009428746057, 0.9123729166556694, 0.9124436005923965, 0.9125130157601021, 0.9125813153943878, 0.9126485047872078, 0.9127146694260848, 0.9127798005169567, 0.9128439104823616, 0.9129069669894808, 0.9129690141158783, 0.91303007586807, 0.9130901902147511, 0.9131491878829601, 0.9132074022027021, 0.9132647402006583, 0.9133212098870158, 0.9133768420552691, 0.9134315877490591, 0.913485524158565, 0.9135386382594378, 0.9135909678119791, 0.9136425103662793, 0.9136932960546994, 0.9137433368856267, 0.913792644137409, 0.9138412746626855, 0.913889238878524, 0.9139365656994797, 0.9139832080505594, 0.9140292114803641, 0.9140745936792831, 0.9141193263483582, 0.9141634026287101, 0.9142068657535679, 0.9142497409685086, 0.9142920349968777, 0.9143337483318267, 0.914374877265857, 0.9144154547121696, 0.9144554792478057, 0.9144949591806393, 0.9145338790989948, 0.9145722557859203, 0.9146100943771687, 0.914647410543295, 0.9146841921607967, 0.9147204717589454, 0.914756270496722, 0.9147916004598656, 0.914826473871787, 0.9148609021056724, 0.9148948973228481, 0.9149284800115594, 0.9149616701659378, 0.9149944429710527, 0.9150267964744769, 0.9150587426014973, 0.9150902918156851, 0.9151214761072172, 0.9151523270891484, 0.9151828044236151, 0.9152129196897998, 0.9152426943204929, 0.9152721601710334, 0.9153012999365978, 0.9153301174144226, 0.9153586187570638, 0.9153867986377582, 0.915414589803123, 0.9154420360512162, 0.9154691837952053, 0.9154960436327615, 0.9155226155138347, 0.915548903363186, 0.9155749281432704, 0.915600697691792, 0.9156262017952841, 0.9156514323748831, 0.9156763926115739, 0.9157011003400574, 0.9157255611040298, 0.9157497792963505, 0.9157737599446891, 0.915797515033723, 0.9158210511532063, 0.9158443715153821, 0.915867481676579, 0.915890383244511, 0.9159130701355633, 0.9159355524574111, 0.9159578414491377, 0.9159799268417234, 0.916001813963706, 0.9160235083010821, 0.9160450142782722, 0.9160663350054863, 0.9160874741174502, 0.9161084408764586, 0.9161292443364459, 0.9161498850124705, 0.9161703461300513, 0.9161906358891095, 0.9162107601251055, 0.9162307237126424, 0.9162505310457694, 0.9162701829076731, 0.916289683603157, 0.9163090333401382, 0.9163282361699685, 0.9163472930437883, 0.9163662058992932, 0.916384973228787, 0.916403599532873, 0.9164220899862663, 0.9164404468187672, 0.916458671849508, 0.9164767642761134, 0.9164947265597, 0.9165125623821851, 0.9165302762139367, 0.9165478708344927, 0.9165653464444562, 0.916582697732867, 0.9165999031645339, 0.9166169796173983, 0.9166339343725795, 0.9166507727682797, 0.9166675010967337, 0.9166841373209295, 0.9167006660087997, 0.9167170918986984, 0.9167334096939589, 0.9167496225518356, 0.9167657366769806, 0.9167817544639317, 0.9167976781078641, 0.9168135082188996, 0.9168292460553681, 0.9168448924613248, 0.916860448447096, 0.9168759148330424, 0.9168912935737352, 0.9169065765961278, 0.916921768736635, 0.9169368724676666, 0.9169518891555547, 0.916966819688841, 0.9169816651856043, 0.9169964246262441, 0.9170110932009301, 0.9170256753808407, 0.9170401726787475, 0.9170545870795697, 0.9170689198031345, 0.9170831028507661, 0.9170971672902442, 0.9171111327461715, 0.9171250093187224, 0.9171387997822146, 0.9171525009271297, 0.9171661202173235, 0.9171796582976357, 0.9171931188017315, 0.9172065040565249, 0.9172198155520411, 0.917233054035778, 0.9172462274829067, 0.91725933670945, 0.9172723722030341, 0.9172853367083583, 0.9172982296596978, 0.9173110487690426, 0.9173237938314409, 0.9173364701638511, 0.9173490781498682, 0.9173616182202114, 0.9173740897043727, 0.9173864929950372, 0.9173988316252399, 0.9174111063652814, 0.9174233176669001, 0.9174354664193447, 0.9174475612575695, 0.9174595866170304, 0.9174715385194975, 0.9174834198951446, 0.9174952359462094, 0.9175069895364265, 0.9175186826103456, 0.917530316820097, 0.9175418931474318, 0.9175534117059958, 0.9175648733491787, 0.9175762759681032, 0.9175876207390299, 0.9175989099543429, 0.9176101440076562, 0.9176213245125664, 0.9176324517294485, 0.9176435275319214, 0.9176545522525494, 0.9176655440765238, 0.9176764729898829, 0.9176873409580146, 0.9176981502893321, 0.9177089051344831, 0.9177196071413455, 0.9177302566134952, 0.9177408561021334, 0.9177514073629809, 0.9177619128952813, 0.9177723732020355, 0.9177827879604991, 0.9177931588581864, 0.9178034856995926, 0.9178137655585019, 0.9178239861150813, 0.9178341568682694, 0.9178442837563828, 0.9178543680823877, 0.9178644101111941, 0.9178744100983706, 0.9178843692966896, 0.9178942863942305, 0.9179041598117905, 0.9179139926466806, 0.917923785885773, 0.9179335446022552, 0.9179432598554497, 0.9179529308759687, 0.9179625632126511, 0.9179721567865842, 0.917981711982335, 0.9179912227713521, 0.9180006592554061, 0.9180100317947045, 0.9180193468879222, 0.9180286069173953, 0.9180378144654257, 0.9180469847556958, 0.9180560971001231, 0.9180651515462092, 0.9180741505727783, 0.9180830981951573, 0.9180919949557538, 0.9181008424566248, 0.9181096415878666, 0.9181183940695526, 0.9181271003583596, 0.9181357602926836, 0.9181443699720522, 0.9181529324278016, 0.9181614506967074, 0.9181699253895756, 0.9181783576004265, 0.9181867474535641, 0.9181950967507584, 0.9182034064084998, 0.9182116772531821, 0.9182199090735382, 0.9182281019902833, 0.9182362561951622, 0.9182443718567834, 0.918252448676901, 0.9182604872651636, 0.9182684867515505, 0.9182764492758166, 0.9182843754554415, 0.9182922652237414, 0.9183001180131231, 0.918307933742946, 0.9183157137850115, 0.9183234584360397, 0.9183311675111028, 0.9183388417444586, 0.9183464817695591, 0.9183540851903239, 0.9183616548088498, 0.9183691903794116, 0.9183766896070994, 0.918384155322908, 0.918391587798042, 0.9183989868741353, 0.9184063531333976, 0.9184136846408097, 0.9184209801960084, 0.9184282415495012, 0.9184354948372251, 0.9184427476201946, 0.9184499643711339, 0.9184571424368935, 0.9184643046507801, 0.9184714235537593, 0.9184785048519439, 0.9184855520418773, 0.9184925669378343, 0.9184995510617032, 0.9185065043091963, 0.9185134258950839, 0.9185203156363283, 0.9185271754264968, 0.9185340037849428, 0.9185407977881136, 0.9185475546521311, 0.9185542762144604, 0.9185609645618209, 0.9185676209016038, 0.9185742462625344, 0.9185808411144076, 0.9185874045578153, 0.9185939370320532, 0.9186004413221699, 0.9186069184377778, 0.9186133588328831, 0.918619760496082, 0.9186261259417745, 0.9186324581758548, 0.9186387612503132, 0.9186450341971095, 0.9186512753461483, 0.9186574786473194, 0.9186636450039551, 0.9186697774924564, 0.9186758667631205, 0.9186819260103238, 0.9186879581826749, 0.9186939624233459, 0.9186999378166607, 0.9187058843763377, 0.9187118021728915, 0.918717691236469, 0.9187235514787546, 0.9187293814563139, 0.9187351821032314, 0.9187409538632081, 0.9187466962540807, 0.9187524111156539, 0.9187580972190208, 0.9187637538741382, 0.9187693824737719, 0.9187749828930678, 0.9187805571418516, 0.9187861050163544, 0.9187916263723447, 0.9187971203967904, 0.9188026001452767, 0.918808051314627, 0.9188134746095484, 0.9188188657720888, 0.9188242289620789, 0.9188295683301716, 0.9188348832595574, 0.9188401729557373, 0.9188454371543723, 0.918850675690897, 0.918855888648101, 0.9188610760939129, 0.9188662376897558, 0.9188713741486454, 0.9188764854958021, 0.9188815723414641, 0.9188866349330918, 0.9188916736159155, 0.9188966888802157, 0.9189016811583017, 0.9189066507077013, 0.9189115976933229, 0.9189165219375051, 0.9189214225597764, 0.918926300373903, 0.9189311554797703, 0.9189359867855078, 0.918940795756994, 0.9189455837274397, 0.918950350674029, 0.9189550969874919, 0.9189598233209857, 0.9189645299154563, 0.9189692152620972, 0.918973880351141, 0.9189785451089135, 0.9189831860173695, 0.9189878056926789, 0.91899241096817, 0.918997006061067, 0.9190015821615318, 0.9190061368285303, 0.9190106702514237, 0.9190151828678926, 0.9190196749592554, 0.9190241470036736, 0.9190285996284175, 0.9190330332329392, 0.9190374474624812, 0.9190418427569332, 0.9190462191712561, 0.9190505768146169, 0.9190549190199354, 0.9190592504831077, 0.9190635631852937, 0.9190678581336663, 0.9190721353048792, 0.9190763954331067, 0.919080638792899, 0.9190848649987117, 0.9190890738646929, 0.9190932651749568, 0.9190974397161007, 0.9191015967944396, 0.9191057367157681, 0.9191098599447342, 0.9191139665916379, 0.9191180567521015, 0.9191221302621586, 0.919126186451527, 0.9191302270981453, 0.9191342502857779, 0.9191382561796254, 0.9191422450658459, 0.9191462198854765, 0.9191501799677059, 0.9191541239113624, 0.9191580518372403, 0.919161963920251, 0.9191658604004322, 0.919169741385431, 0.9191736069259315, 0.9191774570309678, 0.9191812917407355, 0.9191851111551054, 0.9191889152898989, 0.9191927042987847, 0.9191964785845543, 0.9192002381044608, 0.9192039830114662, 0.9192077133598231, 0.9192114291968267, 0.9192151308132428, 0.9192188182384635, 0.9192224914676959, 0.9192261504340249, 0.919229798038283, 0.9192334319771417, 0.9192370549402139, 0.9192406670250348, 0.9192442654819484, 0.9192478503839622, 0.9192514217301802, 0.9192549795860712, 0.9192585240721447, 0.9192620428291587, 0.91926554315793, 0.9192690292505181, 0.9192725017665027, 0.9192759604626728, 0.9192794047917575, 0.9192828345648004, 0.9192862499066202, 0.919289650642668, 0.9192930372861958, 0.9192964102560927, 0.9192997673017885, 0.9193031074501049, 0.9193064326215065, 0.9193097445058386, 0.9193130434295034, 0.9193163294022929, 0.9193196026401205, 0.9193228631381725, 0.9193261099231614, 0.9193293404755027, 0.9193325583246902, 0.9193357666473726, 0.9193389623395732, 0.9193421464178786, 0.9193453188391768, 0.9193484790233462, 0.9193516269513704, 0.9193547622523962, 0.9193578852466034, 0.9193609964890035, 0.9193640962052894, 0.9193671846766925, 0.919370261337547, 0.9193733264440381, 0.9193763804715552, 0.9193794216405268, 0.9193824710203193, 0.9193855102346334, 0.919388537765037, 0.9193915539015134, 0.9193945591071497, 0.9193975534906114, 0.9194005372094284, 0.9194035101365349, 0.9194064724922479, 0.9194094206625805, 0.9194123574990316, 0.919415283730179, 0.9194181990055724, 0.9194211010109119, 0.9194239923413177, 0.9194268732860169, 0.9194297439888062, 0.9194326045571004, 0.9194354550356418, 0.9194382953310213, 0.9194411241044623, 0.9194439425976848, 0.9194467509939048, 0.9194495494483895, 0.9194523381258858, 0.919455117172252, 0.9194578866995887, 0.919460646712138, 0.9194633973757966, 0.9194661388991676, 0.9194688715155633, 0.9194715951499322, 0.9194743099822784, 0.9194770161205749, 0.9194797135868094, 0.9194824100961683, 0.9194851083413657, 0.9194878024953378, 0.9194904891853283, 0.9194931670863355, 0.9194958347353721, 0.9194984928711202, 0.9195011418134456, 0.9195037829621893, 0.9195064243979092, 0.9195090568807437, 0.9195116806121779, 0.9195142959691665, 0.9195168971120316, 0.9195194907481257, 0.9195220769338787, 0.9195246547782293, 0.9195272240754219, 0.9195297847974344, 0.9195323368660581, 0.9195348804964946, 0.9195374158291618, 0.9195399414980665, 0.9195424551110282, 0.9195449616604789, 0.9195474607490214, 0.9195499523099389, 0.9195524363308888, 0.919554912773625, 0.9195573815546556, 0.9195598426995437, 0.9195622961566648, 0.9195647419594459, 0.9195671803003262, 0.9195696112025569, 0.9195720347063853, 0.9195744508055534, 0.9195768595648163, 0.9195792610759586, 0.9195816554063082, 0.9195840450846219, 0.9195864275256467, 0.9195888030663826, 0.9195911718072203, 0.919593533812425, 0.9195958861227751, 0.9195982302507101, 0.9196005674717492, 0.9196028980250756, 0.9196052219800102, 0.9196075393543164, 0.9196098501539082, 0.9196121544286839, 0.9196144522721004, 0.9196167435258974, 0.9196190280806509, 0.9196213060546093, 0.9196235928653093, 0.919625888710774, 0.9196281774216438, 0.9196304588586893, 0.919632733263348, 0.9196350008235398, 0.9196372616013511, 0.919639515626083, 0.919641762961928, 0.9196440038899455, 0.9196462387218368, 0.9196484673368123, 0.9196506895744303, 0.9196529056064018, 0.919655115525411, 0.9196573193929116, 0.9196595172462225, 0.9196617091728976, 0.9196638951948707, 0.9196660753803096, 0.9196682497484967, 0.9196704184118577, 0.9196725814471333, 0.9196747389151771, 0.9196768908618671, 0.9196790373510622, 0.9196811785051023, 0.91968331434561, 0.9196854448119653, 0.9196875696674697, 0.9196896898272257, 0.9196918044143441, 0.9196939132963147, 0.9196960166710113, 0.9196981146986751, 0.919700207474719, 0.9197022993472491, 0.9197043867776772, 0.9197064691347765, 0.919708545978941, 0.919710617187723, 0.9197126827521886, 0.9197147427679965, 0.9197167975230682, 0.9197188470646557, 0.9197208913509587, 0.9197229304070671, 0.9197249643453119, 0.9197269932495239, 0.9197290171461848, 0.919731032273836, 0.9197330398363933, 0.9197350423428222, 0.9197370399665693, 0.919739031471071, 0.9197410173293048, 0.9197429979892489, 0.9197449736367412, 0.9197469444380568, 0.9197489102134823, 0.9197508710553222, 0.919752826986966, 0.9197547780499893, 0.9197567246280485, 0.919758666516288, 0.9197606038063374, 0.9197625366195694, 0.9197644650396815, 0.919766389088445, 0.9197683086893909, 0.919770224054212, 0.9197721351748465, 0.9197740414903539, 0.9197759424822394, 0.9197778390362854, 0.9197797313924403, 0.9197816196206614, 0.9197835037435035, 0.9197853837973432, 0.9197872598069242, 0.9197891317628577, 0.9197909995842131, 0.9197928633051327, 0.9197947229918617, 0.919796578557424, 0.9197984300402288, 0.9198002775469318, 0.9198021212913586, 0.9198039613900665, 0.9198057978504371, 0.9198076306900144, 0.9198094599510159, 0.919811285668612, 0.9198131080206505, 0.919814927114819, 0.9198167427808266, 0.9198185550057372, 0.9198203637907946, 0.9198221691477866, 0.9198239710923315, 0.9198257696646103, 0.9198275648964395, 0.9198293567878395, 0.9198311453416896, 0.919832930518151, 0.919834712385184, 0.9198364909690685, 0.9198382663136809, 0.9198400384265647, 0.9198418073195529, 0.9198435730020494, 0.9198453354852977, 0.9198470947818024, 0.9198488509248016, 0.9198506040067647, 0.9198523539519929, 0.919854102086211, 0.9198558563895634, 0.9198576073122934, 0.9198593553886302, 0.9198611005225755, 0.9198628426389996, 0.9198645816208225, 0.9198663173928799, 0.9198680499306195, 0.9198697792323826, 0.9198715053116182, 0.9198732281947751, 0.91987494788308, 0.9198766644048927, 0.9198783777760209, 0.9198800880134507, 0.9198817950610443, 0.919883498871316, 0.9198851994663667, 0.9198868968599645, 0.9198885910680915, 0.9198902821840541, 0.9198919700561056, 0.9198936549422727, 0.9198953369865444, 0.9198970161671177, 0.9198986923173149, 0.9199003654044602, 0.9199020354507976, 0.9199037025053274, 0.9199053665922503, 0.9199070277383863, 0.9199086859337061, 0.9199103412606774, 0.9199119937528533, 0.9199136434782755, 0.9199152904908182, 0.9199169348017489, 0.9199185764573188, 0.9199202153940229, 0.9199218516259765, 0.9199234852908038, 0.919925116347692, 0.919926744770952, 0.91992837056485, 0.9199299937685582, 0.9199316144154632, 0.9199332325321166, 0.9199348479169558, 0.9199364608548203, 0.9199380714059824, 0.9199396796809496, 0.9199412858732509, 0.9199428898792966, 0.9199444916068098, 0.9199460910743793, 0.9199476882990869, 0.9199492832718699, 0.9199508760616628, 0.9199524667023886, 0.9199540552052177, 0.9199556415713782, 0.9199572258263593, 0.9199588080080058, 0.9199603880730197, 0.919961965813284, 0.9199635412950399, 0.9199651146262536, 0.9199666857886835, 0.9199682547740387, 0.9199698211152123, 0.919971383752569, 0.9199729435091364, 0.9199745006693065, 0.9199760553469696, 0.9199776076121232, 0.9199791575242495, 0.9199807051153509, 0.9199822501783046, 0.9199837925199164, 0.919985332379318, 0.9199868698826872, 0.9199884137742557, 0.9199899642460003, 0.9199915119813075, 0.9199930569577862, 0.9199945991540281, 0.9199961386481623, 0.9199976755243112, 0.919999209811926, 0.9200007408193023, 0.9200022688890952, 0.9200037938860217, 0.9200053158880681, 0.9200068350182596, 0.9200083513901216, 0.9200098650938676, 0.920011376203645, 0.9200128847689876, 0.9200143908207941, 0.920015894380015, 0.9200173954722912, 0.9200188941110202, 0.9200203902879303, 0.9200218839953691, 0.9200233752421995, 0.9200248640192311, 0.9200263502958977, 0.9200278340911187, 0.9200293154376105, 0.9200307943868307, 0.9200322711085519, 0.9200337460118875, 0.9200352187284374, 0.9200366884770148, 0.9200381553519383, 0.9200396195221098, 0.9200410811014035, 0.9200425401417937, 0.9200439966574779, 0.9200454521133994, 0.9200469049592935, 0.9200483551863269, 0.9200498028433758, 0.9200512479661619, 0.920052690578584, 0.9200541301501823, 0.9200555662859394, 0.9200569994943383, 0.9200584300274741, 0.9200598579712552, 0.9200612832022395, 0.9200627056712135, 0.9200641255943651, 0.9200655430274357, 0.9200669579812631, 0.9200683704677023, 0.9200697805003143, 0.9200711880920444, 0.9200725924005101, 0.9200739872898447, 0.9200753797905181, 0.9200767691701144, 0.9200781554354204, 0.9200795385723265, 0.9200809186076077, 0.9200822958661365, 0.92008367035386, 0.9200850421210167, 0.9200864112148471, 0.9200877776839734, 0.9200891417828317, 0.9200905034074445, 0.9200918615552618, 0.9200932166762783, 0.9200945688956241, 0.9200959182154893, 0.9200972646322175, 0.9200986081752095, 0.9200999488772823, 0.9201012867695375, 0.9201026219387765, 0.9201039544377568, 0.9201052843698367, 0.9201066117937751, 0.9201079367183296, 0.9201092623716784, 0.9201105883922883, 0.920111911849732, 0.9201132329444703, 0.9201145516939104, 0.9201158680352425, 0.9201171816479236, 0.920118492852559, 0.9201198018119238, 0.9201211083330725, 0.9201224125958705, 0.9201237144810517, 0.9201250140220857, 0.9201263113047216, 0.920127607616406, 0.9201289019894033, 0.9201301941357511, 0.9201314840106133, 0.9201327716366166, 0.9201340570364841, 0.9201353402203525, 0.9201366212165921, 0.9201379000540433, 0.9201391767780035, 0.9201404514509145, 0.9201417240807561, 0.9201429946776754, 0.9201442632426501, 0.9201455297831513, 0.920146794174604], 'MSE': [1584.5299217265892, 29.855262164172501, 3.1865946322806078, 2.8191649118734472, 2.6456370056836263, 2.5386713416365794, 2.4714289731917041, 2.41638129724728, 2.3392645688432205, 2.2720866114313676, 2.2068727662993761, 2.1490522642591858, 2.0915024723145006, 2.0390028920078271, 1.98784107662788, 1.9469582785809072, 1.9104840517791379, 1.8790355305224278, 1.849791430725956, 1.8181836269142844, 1.7857573670804516, 1.7573914711415277, 1.729381052395387, 1.7035560726937211, 1.6783240190363944, 1.6512906191647023, 1.6270959602897725, 1.6040443470032513, 1.5828326784003528, 1.5582878566717449, 1.5330607966838963, 1.5109379431376972, 1.4907907418189132, 1.4702165770564279, 1.4488052400609048, 1.4274561684873155, 1.4060158562683698, 1.3870907519401001, 1.3690604039223415, 1.3496719582804944, 1.3305883836962245, 1.3128242216471477, 1.2950370905298612, 1.2779568530757879, 1.2623701456691798, 1.2470925286201962, 1.231674393894062, 1.215162988459431, 1.1993973389029784, 1.1858478181863739, 1.1743106216719017, 1.1642951108766242, 1.155453665736238, 1.1475603248741513, 1.1404622782158977, 1.1340401874269379, 1.128200353105167, 1.1228402002732503, 1.1178802996040003, 1.1132727746935451, 1.1089913786818328, 1.1049875863447529, 1.1012361975064369, 1.0977025110001988, 1.0943861044375305, 1.0912520148023406, 1.0882798473994819, 1.0854653479392182, 1.0827878647460318, 1.080239915395101, 1.0778094414143522, 1.0754858732928174, 1.0732608617341806, 1.0711252805100611, 1.0690723487968468, 1.0671001292008226, 1.0651978947017116, 1.0633654835418609, 1.0615975299680271, 1.0598880399227875, 1.0582329451194008, 1.0566291907327503, 1.0550740771943263, 1.0535634934765075, 1.0520948223812718, 1.0506650133921538, 1.0492731607696777, 1.0479185216008051, 1.0465992522288838, 1.0453144103511809, 1.0440616461446193, 1.0428393589193354, 1.0416468432460553, 1.040482725814025, 1.0393449029659505, 1.0382331325992027, 1.0371453447881993, 1.0360809852760293, 1.0350393269148537, 1.0340198187689842, 1.0330211376114768, 1.0320421976747258, 1.0310829303955886, 1.0301437083035396, 1.0292228871569613, 1.028319016710179, 1.0274314294428066, 1.0265594735878689, 1.0257029662366559, 1.0248611815795787, 1.024034482571144, 1.0232226227205223, 1.0224238098281613, 1.0216379820008827, 1.020864139401195, 1.0201023848819188, 1.0193525731526198, 1.0186150823686513, 1.0178893970114946, 1.0171752363078388, 1.0164721561867367, 1.0157821364160968, 1.0151012784639808, 1.0144306697196075, 1.0137702164981257, 1.0131195586476764, 1.0124792687477666, 1.0118484440042428, 1.0112272367461492, 1.0106152053398518, 1.0100123784387873, 1.0094184036235243, 1.0088331404474138, 1.0082564570021428, 1.0076876883555834, 1.0071267126727035, 1.0065731917771792, 1.0060276762445954, 1.0054896333516123, 1.0049588561965164, 1.004435675755792, 1.0039201722489184, 1.0034118400324754, 1.0029103838414484, 1.0024157250415231, 1.0019278578649391, 1.001446825673536, 1.0009722435239741, 1.0005041280587705, 1.0000423821114182, 0.99958718593244711, 0.99913834323214801, 0.99869579394423202, 0.99825935479039229, 0.99782916756363027, 0.99740485181093208, 0.99698616006294061, 0.99657295096832521, 0.99616508156558681, 0.99576241883720507, 0.99536482054780617, 0.99497204707276121, 0.99458386456645265, 0.99420056325717898, 0.99382216597996609, 0.99344853326008653, 0.99307954271861931, 0.9927148202076499, 0.99235399598997387, 0.99199754184565214, 0.99164532234789327, 0.991297086824941, 0.99095246270590465, 0.99061165238302895, 0.99027461144492457, 0.98994126793306669, 0.98961168414774692, 0.98928664666519961, 0.98896564323592595, 0.98864813102486127, 0.98833398608468603, 0.98802320900077312, 0.98771575387024702, 0.98741137552225611, 0.98710998228926528, 0.98681169362504739, 0.98651660401766927, 0.98622467626324617, 0.98593570177224055, 0.98564961570717546, 0.98536636669042976, 0.9850858959198262, 0.98480806322602232, 0.98453279153714501, 0.98426004328372696, 0.98398975348054196, 0.98372190332423226, 0.98345656396700476, 0.98319361719239895, 0.98293293155132355, 0.98267462714728426, 0.98241864165923198, 0.98216491092452762, 0.98191338319642274, 0.98166402209879411, 0.98141678512093367, 0.98117156393370297, 0.9809282526448877, 0.9806868452305153, 0.98044753788009409, 0.9802102346897017, 0.97997486740947237, 0.97974137902744707, 0.97950971815209131, 0.97927987562347707, 0.97905180109386325, 0.97882549213150827, 0.97860090135296796, 0.97837801763397625, 0.97815681831180246, 0.97793732102002862, 0.97771947312089236, 0.97750321409241214, 0.97728851785548387, 0.9770753631340624, 0.97686375931249569, 0.97665367760627131, 0.97644507495274235, 0.97623789906777347, 0.97603211744268203, 0.97582772773127802, 0.97562479204902053, 0.97542356226532057, 0.97522384098062365, 0.97502554303705136, 0.97482860600052601, 0.97463295627887714, 0.97443838378215986, 0.97424506899939933, 0.97405295651216905, 0.97386210826875896, 0.9736724873428283, 0.97348402116655097, 0.97329668173410033, 0.97311044337134056, 0.97292529894245772, 0.9727412337303909, 0.97255823786331075, 0.97237629952473259, 0.9721954091199525, 0.9720155437888699, 0.97183679795151312, 0.9716591150425693, 0.97148246614544109, 0.97130683527990314, 0.97113221205382561, 0.97095858339047914, 0.97078596121482064, 0.97061440178124014, 0.97044385279612311, 0.9702742965665252, 0.97010570987740508, 0.96993807846200941, 0.96977219761331146, 0.96960770397148743, 0.96944436801282252, 0.96928207160858293, 0.96912078231114607, 0.96896053765811296, 0.9688012503543767, 0.96864291285807258, 0.9684854826709014, 0.96832893257794117, 0.96817324515228864, 0.96801841165142255, 0.96786433880003286, 0.96771101705415208, 0.96755855766777099, 0.9674069285396738, 0.96725613628641827, 0.96710620766790734, 0.9669571450812009, 0.96680888634068152, 0.96666142695993162, 0.96651476190138463, 0.96636889900710754, 0.96622383368437181, 0.96607952461180957, 0.96593596278021088, 0.96579314290617413, 0.96565105458929923, 0.9655095968376417, 0.96536895168963943, 0.96522916567352413, 0.96509020451882399, 0.96495200738166365, 0.96481454076862694, 0.96467778193681708, 0.9645417115633953, 0.96440631816608113, 0.9642716004157077, 0.9641375483312723, 0.96400418657759934, 0.96387150139748179, 0.96373946597985538, 0.96360807572112217, 0.96347731174912155, 0.96334717101858225, 0.96321763161619622, 0.96308868965238448, 0.96296013243838086, 0.96283231100885069, 0.9627052023771836, 0.96257877954484306, 0.96245299396704542, 0.96232782637068659, 0.96220327320522603, 0.96207930463255054, 0.96195590011825471, 0.96183303043190205, 0.96171068969053819, 0.96158888166889323, 0.96146758663027876, 0.96134680686126617, 0.96122657658603405, 0.96110703989380841, 0.96098808568773386, 0.96086964451496426, 0.96075170113638342, 0.96063425245367973, 0.96051729547779585, 0.96040081555666634, 0.96028482803413551, 0.96016935138010928, 0.96005434936932443, 0.95993981045961074, 0.95982567531642227, 0.95971204850735603, 0.95959893903094795, 0.95948628198957431, 0.95937407830617782, 0.9592623234829688, 0.95915108802815485, 0.95904072162283349, 0.95893110309699403, 0.95882215644400481, 0.95871385380074625, 0.95860616496535811, 0.95849891188602443, 0.95839233652461431, 0.95828643832556304, 0.9581811882972523, 0.95807653947757565, 0.95797248552269032, 0.95786900769539374, 0.95776609558538151, 0.9576637290751624, 0.95756190282358156, 0.95746061872071231, 0.95735992238535594, 0.9572597783638771, 0.95716015113902109, 0.95706103356697569, 0.95696241285239603, 0.95686428754145747, 0.95676663656117511, 0.95666944919253927, 0.95657271577028813, 0.95647643876534061, 0.95638061676584629, 0.95628524752921118, 0.95619032908344093, 0.95609586491673426, 0.95600184789881426, 0.95590828820519513, 0.95581516080991513, 0.95572245849111459, 0.95563018202870609, 0.95553833806109612, 0.95544692753143523, 0.95535593439552546, 0.95526535518301892, 0.95517519205677393, 0.95508543642912413, 0.95499608089139465, 0.95490715346745569, 0.95481862138478946, 0.95473048751732448, 0.95464277870566738, 0.95455546183931794, 0.95446853374634588, 0.95438199627895248, 0.954295842627785, 0.95421009542426616, 0.9541247687075054, 0.95403984200406466, 0.95395500963554014, 0.95387018317047645, 0.95378577812568299, 0.95370182553163263, 0.9536180583365329, 0.95353479769335769, 0.95345197686505145, 0.95336955495704889, 0.9532875107501132, 0.95320582644467444, 0.95312450326045284, 0.95304355038126931, 0.95296296994788054, 0.95288273981366689, 0.95280287729584567, 0.95272341658744819, 0.9526443902479147, 0.95256577678677878, 0.95248755179837241, 0.95240970116139456, 0.95233221284416958, 0.9522550813521099, 0.9521783172044368, 0.95210191526327259, 0.95202584295548875, 0.95195008847295004, 0.95187476346285638, 0.95179989145000643, 0.95172544302670259, 0.95165138303686991, 0.95157766408959377, 0.95150429750679422, 0.95143130282142685, 0.95135875079384369, 0.95128663085892717, 0.95121490703597633, 0.95114368867642862, 0.95107282146264804, 0.95100227090855871, 0.95093204703537171, 0.95086216055257067, 0.95079261129970982, 0.95072339845207232, 0.9506545216570681, 0.95058598194760779, 0.95051779620568966, 0.95044995350652772, 0.95038244866072263, 0.95031528730751758, 0.95024844792908636, 0.95018194489837016, 0.95011578628591664, 0.95004995580233587, 0.94998445490788896, 0.94991926009622829, 0.94985437375057857, 0.94978979755782833, 0.94972554102696927, 0.94966145146362813, 0.94959769615364764, 0.94953426685497477, 0.94947121336739482, 0.94940848703872416, 0.94934603932412043, 0.94928387743760634, 0.9492220106714172, 0.94916044211718464, 0.94909917369961894, 0.9490382044504353, 0.94897753357516201, 0.94891716503369539, 0.94885709048681177, 0.94879730963953623, 0.94873781535470325, 0.94867860473808308, 0.94861967375196288, 0.94856101666216386, 0.94850262841251809, 0.94844450599105645, 0.94838664746905754, 0.94832905492439667, 0.94827173865481806, 0.94821468914274298, 0.94815790521994125, 0.94810139965661877, 0.94804515530827937, 0.94798915658207472, 0.94793340374485258, 0.94787789222674534, 0.94782261438861404, 0.94776756741242474, 0.94771276894474554, 0.94765820740409945, 0.94760364973790689, 0.94754937100646575, 0.94749534061180452, 0.94744147863328043, 0.94738773574744883, 0.94733421499164561, 0.94728094491550474, 0.94722792330431116, 0.94717514503907729, 0.94712260682958738, 0.9470703030833979, 0.94701822646413758, 0.9469663722999313, 0.94691474473991677, 0.94686333863927385, 0.94681215335519475, 0.94676118761089234, 0.94671040242549642, 0.94665974287715526, 0.94660930275191657, 0.94655907027017605, 0.94650904570498817, 0.9464592204696578, 0.94640959135312619, 0.94636016286461055, 0.94631093717777937, 0.94626191681757221, 0.94621309258417341, 0.94616447259072289, 0.94611605326073389, 0.94606782915981935, 0.94601979899793887, 0.94597196165672504, 0.94592431905387842, 0.94587687902882955, 0.94582962078731947, 0.94578256674108685, 0.94573571495799458, 0.94568906209122028, 0.94564257374323801, 0.94559625775966516, 0.94555013052820103, 0.94550419063607272, 0.94545843603756574, 0.94541286392526724, 0.94536747304016966, 0.94532226278948339, 0.94527723306755296, 0.94523238340425753, 0.94518771263153833, 0.94514322056435396, 0.94509890540543751, 0.94505476244385711, 0.94501079217956863, 0.94496699282357044, 0.94492336374134089, 0.94487990437972857, 0.94483661133802099, 0.94479348427245569, 0.94475052323909114, 0.94470772902054334, 0.94466506768951186, 0.94462256618506368, 0.94458019305025998, 0.94453794714435624, 0.94449586062654045, 0.94445393264293687, 0.94441216320402555, 0.94437055154410077, 0.94432909625371797, 0.94428794188302034, 0.94424700304375386, 0.94420623070679366, 0.94416561715788694, 0.9441251652415571, 0.94408488135852864, 0.9440447677187831, 0.94400482286247378, 0.94396504883107757, 0.94392543962174302, 0.94388599033526688, 0.94384672729366859, 0.94380766187896437, 0.94376877162995165, 0.94373003678268319, 0.943691453519655, 0.94365302172634247, 0.94361473887748404, 0.9435766050293799, 0.94353863156531914, 0.94350084795364009, 0.94346321291439683, 0.94342568929441872, 0.94338831339692553, 0.94335107333221435, 0.9433139696045072, 0.94327700899874201, 0.94324019173732732, 0.94320352215765813, 0.94316699651500147, 0.94313060831810436, 0.94309435492732441, 0.94305823305346226, 0.94302224931238343, 0.94298640070781492, 0.94295068167974461, 0.9429151130415262, 0.94287944837190618, 0.94284390259480022, 0.94280849346935292, 0.94277321760400346, 0.94273806958261896, 0.94270304813427741, 0.94266815141655569, 0.94263338091557725, 0.94259873405457839, 0.94256425310171321, 0.94252990470666753, 0.94249568034820663, 0.9424615841250944, 0.94242764310472049, 0.94239382693530227, 0.94236013223412274, 0.94232655731937764, 0.94229310093477547, 0.94225976255702137, 0.942226543278602, 0.94219345875746408, 0.94216049447080696, 0.94212764827578932, 0.94209491835644998, 0.94206230278595793, 0.94202979985839375, 0.94199740826266309, 0.94196512794915188, 0.94193295697757928, 0.94190089290818235, 0.94186893301220942, 0.94183707816744233, 0.94180532626860014, 0.94177367605286477, 0.94174212726307505, 0.94171058966462684, 0.94167903176431067, 0.94164752171378874, 0.94161609895985143, 0.94158477899933557, 0.94155357894292291, 0.94152249015119727, 0.94149150888310207, 0.94146061876649689, 0.94142972529350255, 0.94139893653088091, 0.94136825012212233, 0.94133766165847432, 0.94130723943912797, 0.94127690501681605, 0.94124665773155469, 0.94121650800493739, 0.94118645824342373, 0.94115650877475088, 0.94112666051362803, 0.9410969109431172, 0.9410672584209302, 0.9410377189233321, 0.94100832042857896, 0.94097900454661954, 0.94094977592529927, 0.94092063534491643, 0.94089158294982767, 0.94086261918734915, 0.94083374503397643, 0.9408049601907309, 0.94077626526139912, 0.94074765985502851, 0.94071914172090421, 0.94069071058708975, 0.94066236598286046, 0.94063410798140923, 0.94060593582538143, 0.940577848441289, 0.94054984504168238, 0.94052189605095116, 0.94049403170547075, 0.94046624806378698, 0.94043854395175785, 0.94041091861775505, 0.94038340667198661, 0.94035599042539364, 0.9403286549598685, 0.94030139747799701, 0.94027421716901805, 0.94024711382517678, 0.94022008737730745, 0.94019313724180975, 0.94016626232564293, 0.94013946447977037, 0.94011274498398523, 0.94008610245528779, 0.94005935657460182, 0.94003250502590374, 0.94000573692137657, 0.93997905388940883, 0.93995245310621478, 0.93992593237394073, 0.93989949096643743, 0.93987312854101046, 0.93984684434688248, 0.93982063509686298, 0.93979449714538243, 0.93976843190516379, 0.93974244125268569, 0.9397165231796637, 0.93969067660209371, 0.93966490080122766, 0.93963919534059648, 0.93961355919619427, 0.93958799211148947, 0.9395624932892106, 0.93953706250383795, 0.93951169844047022, 0.93948640020157337, 0.9394611670754176, 0.93943599852543003, 0.93941089380473597, 0.93938585148247877, 0.93936087130576995, 0.93933595398360359, 0.9393111022843369, 0.93928630550519332, 0.93926157390201737, 0.93923690902461143, 0.93921230855869853, 0.93918777063014325, 0.93916329412302646, 0.93913882818314642, 0.93911441419681563, 0.93909005954668046, 0.93906576937424513, 0.93904154511162519, 0.9390173868633056, 0.93899329351049443, 0.93896926168573924, 0.93894529083639844, 0.93892138145135506, 0.93889753323716241, 0.93887374488003283, 0.93885001539950419, 0.93882634448585411, 0.93880277613202057, 0.93877929625735124, 0.93875587551770656, 0.93873251188450491, 0.93870921982023847, 0.93868599379309869, 0.9386628285636619, 0.93863972195841816, 0.93861667203266574, 0.93859368088828099, 0.93857074744569391, 0.93854787143134855, 0.93852505235899153, 0.93850228574148387, 0.9384795739747589, 0.93845691598714553, 0.9384343103591003, 0.93841175611172367, 0.93838925299038345, 0.93836680188943933, 0.93834440033299316, 0.93832204841533529, 0.93829975269706112, 0.93827751924234015, 0.93825533769131253, 0.93823320523761422, 0.93821112106304283, 0.93818908490380415, 0.93816709633444717, 0.93814515506557883, 0.93812326120700695, 0.93810141570527117, 0.93807961816105467, 0.93805786779957112, 0.93803616563806924, 0.93801451122732615, 0.93799290331992591, 0.93797133941502708, 0.93794981814940903, 0.93792833943673737, 0.93790690307182545, 0.93788550856090613, 0.93786415549263735, 0.93784284178696398, 0.93782156618440216, 0.93780033067662483, 0.9377791354149283, 0.93775798038476621, 0.93773686544827184, 0.93771579042277819, 0.93769475483833942, 0.93767375832283961, 0.93765280087603886, 0.93763188246426188, 0.93761100355345395, 0.93759016334876932, 0.93756936154284487, 0.93754859762251641, 0.93752787149955641, 0.9375071830355709, 0.93748653212057298, 0.93746591862306317, 0.93744534239678912, 0.93742480305300957, 0.93740429951022974, 0.93738383265379754, 0.93736338697842114, 0.9373428691506227, 0.93732239086161062, 0.93730194586319704, 0.93728153527814662, 0.93726115998515902, 0.93724082135356523, 0.93722052026246683, 0.9372002569990181, 0.93718003158262286, 0.93715984385598228, 0.93713969350975845, 0.93711958052960631, 0.93709950458383773, 0.93707946548756749, 0.93705946304213505, 0.9370394979041079, 0.93701957062886854, 0.93699968095792996, 0.93697982873026475, 0.93696001375894866, 0.9369402349527004, 0.93692049408630373, 0.93690078814195255, 0.93688111543559316, 0.93686147622222682, 0.93684187245126782, 0.93682230450488524, 0.93680277212291729, 0.93678327473227108, 0.936763812049902, 0.93674438376213143, 0.93672498998625831, 0.9367056297577655, 0.93668630268422604, 0.93666700796983993, 0.93664774498451464, 0.93662851359646782, 0.93660931326475549, 0.9365901447321171, 0.93657100783346248, 0.93655190095918062, 0.93653282458660292, 0.93651377901618571, 0.9364947641980309, 0.93647577967398987, 0.93645682505358507, 0.93643790002627647, 0.93641900694947944, 0.93640014249179693, 0.93638130594829938, 0.93636249602652333, 0.93634371046300902, 0.93632495046931119, 0.93630621712467532, 0.93628751021169054, 0.93626882953057544, 0.93625017518732245, 0.93623154637569328, 0.93621294269893551, 0.93619436402640455, 0.93617581034373409, 0.93615728135281173, 0.93613877661101552, 0.93612029662474416, 0.93610184382807649, 0.93608341744627577, 0.93606501621659588, 0.93604664035238982, 0.93602828995062426, 0.93600997047444412, 0.9359916943170431, 0.93597345185252712, 0.93595523975472628, 0.93593705669161509, 0.93591890184452009, 0.93590077451775566, 0.9358826743370332, 0.93586460372479929, 0.93584656494059748, 0.93582855518761288, 0.93581057299010617, 0.93579251607805647, 0.93577438220615827, 0.93575628033884617, 0.93573821073799779, 0.93572017365399962, 0.93570216817307794, 0.93568419331135944, 0.93566624872439319, 0.93564834250215223, 0.93563047063704252, 0.93561263471129352, 0.93559483381280362, 0.93557706650270167, 0.93555933145323078, 0.93554162760927662, 0.93552395410363343, 0.9355063103569693, 0.93548869600789952, 0.93547111081139611, 0.93545355446756706, 0.9354360268197216, 0.93541852796460434, 0.93540105799171069, 0.93538361679738713, 0.93536620448912011, 0.93534882142440279, 0.9353314673819626, 0.93531414197915297, 0.93529684461414331, 0.93527957330132205, 0.93526232325581737, 0.93524509878632722, 0.93522790902939246, 0.93521075288188649, 0.93519362836838293, 0.93517653415704194, 0.93515946963998642, 0.93514243465116587, 0.93512541205702626, 0.93510841998901539, 0.93509145855044873, 0.93507452716968087, 0.93505762542892301, 0.93504075304866086, 0.9350239162329983, 0.93500711960194827, 0.93499035720843948, 0.93497362610403745, 0.93495692528398344, 0.93494025619202359, 0.93492361940373991, 0.93490701239066287, 0.93489043450085896, 0.93487388560757101, 0.9348573655721466, 0.93484087423597861, 0.93482441144766493, 0.93480798705946744, 0.93479167283471976, 0.93477538654706638, 0.934759136762636, 0.93474292340204868, 0.93472674663032695, 0.93471060613431911, 0.9346944981144083, 0.93467842250104149, 0.93466237870664115, 0.93464636617869634, 0.93463038434853651, 0.93461443024035007, 0.93459850507023035, 0.93458262056370789, 0.93456677145777955, 0.93455095628895735, 0.9345351750316272, 0.93451942772855146, 0.93450371403587018, 0.93448803356976295, 0.93447238596647375, 0.93445677021080131, 0.93444118568570511, 0.93442563118233979, 0.93441010601347685, 0.93439461007668767, 0.93437910561613102, 0.93436359686019366, 0.93434811808231311, 0.93433266693795303, 0.93431724322352838, 0.9343018476736431, 0.93428648403727588, 0.93427114856471749, 0.93425583935219958, 0.93424055865635702, 0.93422530437353468, 0.93421007789863464, 0.93419487884020636, 0.93417970619535418, 0.9341645449064695, 0.93414940629191356, 0.93413429371960965, 0.93411920771394952, 0.93410414801029718, 0.93408911434288722, 0.93407410659316759, 0.93405912442932482, 0.93404416751405162, 0.93402923531755966, 0.93401432710955168, 0.93399944279669445, 0.93398458226031422, 0.93396974548897305, 0.93395493239529104, 0.93394014443626439], 'Rp': [-5.6994053475107662e-14, 0.42971695944005583, 0.85310133403478117, 0.87140840315768953, 0.8802749417122413, 0.88523049227834505, 0.88845663773241046, 0.89128597614012151, 0.89492639464509061, 0.89808431737315508, 0.90117854950874599, 0.90385699981681267, 0.90659004548772049, 0.90903679425509909, 0.91141012244385589, 0.91333324463875143, 0.91501094549277351, 0.91643153227021346, 0.91783163988452188, 0.91929596337841424, 0.92080172456010578, 0.92211151508688061, 0.92339166692947738, 0.92455907843000784, 0.92573552205972054, 0.92697022121052997, 0.92808747888884568, 0.92914566199376081, 0.93013229059706337, 0.93126660393078897, 0.93241410476944142, 0.93342566642416702, 0.93434533796349073, 0.93529314440893863, 0.9362756715974786, 0.93726548538329391, 0.93822913202756753, 0.93908400062200736, 0.93989611678823515, 0.94077677346584154, 0.9416394326316142, 0.94244026258126357, 0.94324787975569235, 0.94401170895454301, 0.94470343148354752, 0.94539028382186241, 0.94609119998519942, 0.94684211249510808, 0.94753622941907756, 0.94812837603620093, 0.94863083402189785, 0.94906817453299741, 0.94945387953500682, 0.94979828235019226, 0.95010856745450822, 0.95039002497248171, 0.95064643766118773, 0.9508820144540272, 0.95110017864596341, 0.95130341374851735, 0.95149242317838689, 0.95166878757565565, 0.9518345415269297, 0.95199042718009019, 0.95213710758126557, 0.95227553583940572, 0.95240717117402518, 0.95253189262150351, 0.95265021418154394, 0.95276273863283512, 0.95287006487418924, 0.9529727724435384, 0.95307116077996057, 0.95316560322950461, 0.95325643708571939, 0.95334403266013479, 0.95342809452075039, 0.95350911555578033, 0.95358731488563986, 0.95366288552516854, 0.95373605702656183, 0.95380698892690785, 0.95387579223898167, 0.95394261566398553, 0.95400768213390108, 0.95407105129762526, 0.95413276411090742, 0.95419275535085613, 0.95425115242344472, 0.95430807002748907, 0.95436378678511335, 0.95441796697170067, 0.95447078857588807, 0.95452233177885804, 0.95457275381289297, 0.9546220159718366, 0.9546701764650618, 0.95471735349747433, 0.95476351886248489, 0.95480869174000371, 0.95485292599011506, 0.95489627549708045, 0.95493874604477713, 0.95498034589776826, 0.95502111750726193, 0.95506113359459022, 0.95510042381065186, 0.95513901344673446, 0.95517693384078772, 0.95521419906724414, 0.95525082823357343, 0.95528678831458436, 0.95532215215075389, 0.95535694653737291, 0.9553912030631293, 0.95542491898615245, 0.95545810763512373, 0.95549075149919005, 0.95552290771563808, 0.95555452582408928, 0.95558568272894884, 0.95561636588817367, 0.95564658575329275, 0.95567632060618846, 0.95570558534169037, 0.95573440375288665, 0.95576275351874396, 0.9557906943763973, 0.95581821321927263, 0.95584535010172078, 0.95587206157825366, 0.95589837088051477, 0.9559243184616415, 0.95594988002584225, 0.95597508249720009, 0.95599993390591842, 0.95602445401169733, 0.95604862151004377, 0.95607245781306405, 0.95609597444080863, 0.95611916750805803, 0.95614202050272623, 0.95616456195109667, 0.95618679119860017, 0.95620871241711969, 0.95623032968662369, 0.95625165252596034, 0.95627268433263268, 0.95629342524208738, 0.95631387982902094, 0.95633404254630172, 0.95635392074893055, 0.95637355128701029, 0.95639288055696747, 0.95641193511499545, 0.95643072716722322, 0.95644926731522084, 0.95646756360891927, 0.95648562146535188, 0.956503446007549, 0.95652105447993141, 0.95653845475334676, 0.95655563889453044, 0.95657260525977295, 0.95658935674919765, 0.95660589670056118, 0.95662222201122604, 0.95663835266276132, 0.95665432906233683, 0.95667011451222972, 0.95668571329127927, 0.95670113505008481, 0.95671640905014832, 0.95673150937328455, 0.95674644205032555, 0.95676121002786585, 0.9567758159486085, 0.95679022516525924, 0.95680445650739776, 0.95681853015511409, 0.95683245052644939, 0.95684621926686231, 0.95685987711886911, 0.95687347359038555, 0.95688684586304606, 0.95690006165566555, 0.95691313770697317, 0.9569260613797248, 0.95693884500744431, 0.95695150737015888, 0.95696406355977737, 0.95697647940865371, 0.95698877897727719, 0.95700096556673875, 0.95701304221176853, 0.95702501136005347, 0.95703687046153729, 0.95704861814388997, 0.95706025922859705, 0.9570718088732616, 0.95708325401984606, 0.95709458793468349, 0.95710582011282253, 0.95711695484994552, 0.95712799483618727, 0.95713894175035641, 0.95714979930550648, 0.95716057517431385, 0.95717126788885343, 0.95718186498978131, 0.95719236944925723, 0.957202787792517, 0.95721312240184042, 0.95722337667116042, 0.95723355055425929, 0.9572436471535295, 0.95725366612059848, 0.9572636097184124, 0.95727347809788454, 0.95728327211578068, 0.95729299129734946, 0.95730263811457361, 0.957312216041737, 0.95732172436999718, 0.95733116528282836, 0.9573405402355637, 0.95734984669579903, 0.9573590873140948, 0.95736826501950656, 0.9573773812666162, 0.95738643514241162, 0.95739542423397783, 0.9574043390351259, 0.95741318682778465, 0.95742197104326188, 0.95743069458686192, 0.95743936165764054, 0.9574479836670946, 0.9574565496545705, 0.9574650614943182, 0.95747351676749415, 0.95748191725515674, 0.9574902670943336, 0.95749856734197791, 0.95750681916698488, 0.95751502256255472, 0.95752317785117247, 0.95753128569916302, 0.95753934677462926, 0.95754736132690865, 0.9575553319427843, 0.95756325268418707, 0.95757112623355878, 0.95757895355533562, 0.95758673547228224, 0.95759447259151631, 0.95760216612836579, 0.9576098173614197, 0.95761742338839473, 0.95762498264554152, 0.95763249665660699, 0.95763996742009183, 0.95764739538847043, 0.95765474700118425, 0.95766203663627747, 0.95766927400877122, 0.95767646642134796, 0.95768361479756958, 0.95769071844575093, 0.95769777920942967, 0.95770479769364081, 0.95771177592700241, 0.95771871564906341, 0.95772561732341621, 0.95773248163291524, 0.95773931337942542, 0.95774611309654478, 0.9577528719559153, 0.9577595937563983, 0.95776627959255989, 0.95777292736381625, 0.9577795389569741, 0.95778611365687538, 0.95779265256212387, 0.95779915616506484, 0.95780562426432903, 0.95781205709713213, 0.95781845582443392, 0.95782482132260516, 0.95783115451388146, 0.9578374557076168, 0.95784373384752153, 0.95784997663625338, 0.95785617524856159, 0.95786233672277155, 0.95786846427171279, 0.95787455948463218, 0.95788062353125503, 0.95788665746620938, 0.95789266115044303, 0.95789863488674398, 0.95790457970180543, 0.95791049421553642, 0.9579163786089141, 0.95792223410088229, 0.95792806103089589, 0.95793386072026132, 0.95793963319290243, 0.95794537847961359, 0.95795109723875538, 0.95795680868344779, 0.95796248098050463, 0.95796811900273571, 0.95797372722536411, 0.95797930699839695, 0.95798485905608555, 0.95799038324632479, 0.95799588111346867, 0.95800135345156734, 0.95800680178971986, 0.95801222658761187, 0.95801762841234639, 0.95802300758400916, 0.95802836339983799, 0.95803369594643173, 0.95803900140954823, 0.95804427880782528, 0.95804953280767624, 0.95805476373686904, 0.95805997228103668, 0.95806515886633659, 0.95807032380073232, 0.9580754669657231, 0.95808058758663617, 0.95808568653166204, 0.95809076478904298, 0.95809582565327944, 0.95810085922030186, 0.95810587055394747, 0.95811086448445471, 0.95811583865407524, 0.95812079328541899, 0.95812572501538762, 0.95813061874568317, 0.9581354802429457, 0.95814031145917444, 0.95814511370929345, 0.95814988975706794, 0.95815465323965587, 0.95815937953884223, 0.95816407650538249, 0.95816874462089374, 0.95817338525214979, 0.95817799970921946, 0.95818258789202859, 0.95818715060885606, 0.95819168998822646, 0.95819620516419035, 0.95820069611372105, 0.95820516111558651, 0.95820960183151782, 0.95821401948395968, 0.95821841441116828, 0.95822278708438602, 0.95822713747871724, 0.95823146709257778, 0.95823577622811962, 0.95824006490411917, 0.95824433305752699, 0.95824858079500363, 0.95825280838841775, 0.95825701614793501, 0.95826120538870485, 0.95826537503209142, 0.95826952428044465, 0.95827365396634845, 0.95827776442415324, 0.95828185578182756, 0.95828592797100032, 0.95828998079618743, 0.95829401493282251, 0.95829803090893273, 0.95830202816921861, 0.95830600729216053, 0.95830996871122176, 0.95831391122833165, 0.95831783628000988, 0.95832174339242959, 0.95832563140427396, 0.9583295023458015, 0.95833335688464116, 0.95833719391248906, 0.9583410140656391, 0.95834481587618836, 0.95834859932800631, 0.9583523659266866, 0.95835613297971178, 0.95835990310647401, 0.95836365234199128, 0.95836738011596201, 0.95837110359771649, 0.95837480084682602, 0.95837847503126072, 0.95838213100319403, 0.95838577036172024, 0.95838939369917531, 0.95839300073680744, 0.95839659148195344, 0.95840016589996835, 0.95840372451459699, 0.95840726633653262, 0.95841079036418997, 0.95841429492622421, 0.9584177814660314, 0.95842125082352314, 0.95842470344227337, 0.95842814004098009, 0.95843156168428123, 0.95843496611617274, 0.95843836682694095, 0.95844174667687332, 0.95844510624454393, 0.95844844642020655, 0.95845176749842131, 0.95845507561885812, 0.95845836190696909, 0.95846163253713579, 0.95846488711181554, 0.95846812492266842, 0.95847134263593603, 0.9584745406316425, 0.95847772268897113, 0.95848088255865482, 0.95848402581286796, 0.95848715488031, 0.95849026925622161, 0.9584933690047569, 0.95849645367742786, 0.95849952322443366, 0.95850257774201197, 0.9585056172553158, 0.95850864144897185, 0.95851165071385458, 0.95851464458214719, 0.95851762258541229, 0.95852058651313321, 0.95852353530644763, 0.95852646870448965, 0.95852938853888647, 0.95853229559992859, 0.95853518780184044, 0.95853806562085586, 0.95854093029366394, 0.95854378028498743, 0.9585466230646148, 0.95854945104543998, 0.95855226506222846, 0.95855506956639214, 0.95855787306449014, 0.95856064619702808, 0.95856340451422994, 0.95856614900019754, 0.95856887990573125, 0.95857159733347641, 0.95857430149644607, 0.95857699256379614, 0.9585796701401017, 0.95858233476699406, 0.95858498626753141, 0.95858762500383654, 0.95859025111002261, 0.95859286481755668, 0.95859546673548046, 0.95859805674472542, 0.95860063488271774, 0.95860320123536247, 0.95860575699226558, 0.95860830050401336, 0.95861083151549886, 0.95861335062625364, 0.95861585787996051, 0.95861835339446166, 0.95862083809441023, 0.95862331233366527, 0.95862577543078376, 0.95862822829258054, 0.95863067197134055, 0.95863310385899403, 0.95863552482732417, 0.95863794959854864, 0.95864036765396299, 0.95864276623931932, 0.95864516139874101, 0.95864754615601855, 0.95864991976911318, 0.95865228266053815, 0.95865463483577773, 0.95865697678913819, 0.95865930802383592, 0.95866162866690685, 0.95866393934000582, 0.95866624017852586, 0.95866853104601557, 0.95867081196683568, 0.95867308300463716, 0.95867534431074031, 0.95867759778770734, 0.9586798458831215, 0.95868208417936041, 0.95868431323952896, 0.95868653273608284, 0.95868874309739927, 0.95869094454844561, 0.95869313734044026, 0.9586953214765932, 0.95869749705566898, 0.95869966321435229, 0.95870182018914119, 0.95870396821581316, 0.95870610768975095, 0.95870823851888065, 0.95871036073432181, 0.95871247402920745, 0.95871457834266793, 0.95871667533724347, 0.95871876298641834, 0.95872084131500257, 0.95872290982455644, 0.95872497127943623, 0.95872702540869781, 0.95872907110263417, 0.95873110847732279, 0.9587331377261421, 0.95873515887056981, 0.95873717195281538, 0.95873917702892686, 0.95874117413780591, 0.95874316320848851, 0.95874514430351709, 0.95874711743613472, 0.95874908277805049, 0.95875104042148962, 0.95875299037498396, 0.95875493274817947, 0.95875686760173084, 0.95875879501812689, 0.95876071504003002, 0.95876262788507327, 0.95876453336129808, 0.95876643647353943, 0.95876832935338796, 0.95877021568687704, 0.95877209597316349, 0.95877396955447336, 0.95877583612982087, 0.95877769574241611, 0.95877954836655843, 0.95878139402278395, 0.95878323268956378, 0.95878505731164376, 0.95878687285904707, 0.95878868112760396, 0.95879048280626389, 0.95879227768477981, 0.95879406512809173, 0.95879584503246906, 0.95879761748306414, 0.95879938221067151, 0.95880113954829238, 0.95880288972522965, 0.95880463203291422, 0.95880636579084499, 0.95880809210656637, 0.95880981137117871, 0.95881152379202017, 0.95881322946706282, 0.95881492851482286, 0.95881662095782183, 0.95881830539697144, 0.95881998147087655, 0.95882165163771149, 0.95882331771996521, 0.95882497671110933, 0.9588266289505436, 0.95882827503537804, 0.95882991486177749, 0.95883154846625884, 0.95883317557501169, 0.95883479634303215, 0.95883641092800709, 0.95883801941409474, 0.95883962209242812, 0.95884121887071505, 0.95884280966684354, 0.9588443946919174, 0.9588459843653675, 0.95884756846342956, 0.95884914534329269, 0.95885071622401052, 0.95885228120133026, 0.95885384055339296, 0.95885539430878974, 0.95885694256572129, 0.95885848522246042, 0.95886002247088786, 0.95886155327068134, 0.95886307781301272, 0.95886459667252211, 0.9588661141067677, 0.95886762085036403, 0.95886912202858965, 0.95887061782187055, 0.95887210821589963, 0.95887359338204303, 0.95887507343072764, 0.95887654806047096, 0.95887801634847558, 0.958879479396611, 0.95888093730016544, 0.95888239011129572, 0.95888383787924558, 0.95888528066912304, 0.95888671854293428, 0.95888815147896533, 0.95888957956125809, 0.95889100287642492, 0.95889242159716326, 0.95889383581033316, 0.95889524542863103, 0.95889665048235906, 0.958898051312564, 0.95889945259953902, 0.9589008553588434, 0.95890225583499966, 0.95890365133878142, 0.95890504164040047, 0.95890642675965532, 0.95890780719649582, 0.9589091837448338, 0.95891055974231409, 0.95891193115290951, 0.95891329792113855, 0.95891466010314663, 0.95891601792273584, 0.95891736842005082, 0.958918715081023, 0.95892005785744616, 0.95892139640400842, 0.9589227304210971, 0.95892405995946273, 0.95892538502624458, 0.95892670565298033, 0.95892802192573601, 0.95892933311292805, 0.95893063798869771, 0.95893193915755826, 0.9589332364689257, 0.95893452990315065, 0.95893581944934636, 0.95893710507025343, 0.95893838672945864, 0.95893966445069323, 0.9589409381887295, 0.95894220793631846, 0.95894347384173273, 0.95894473597231455, 0.95894599424494387, 0.9589472486661097, 0.95894849925845949, 0.95894974596435756, 0.95895098754489316, 0.95895222831177129, 0.95895346555988292, 0.95895469920912135, 0.95895592929849793, 0.95895715586665897, 0.95895837730568734, 0.95895959433499744, 0.95896080777437531, 0.95896201779505219, 0.9589632244256262, 0.95896442767677204, 0.95896562753798986, 0.95896682401729449, 0.95896801719822211, 0.95896920718773115, 0.95897039354919056, 0.95897157644943576, 0.95897276385570196, 0.9589739560017102, 0.95897514455261523, 0.95897632935385646, 0.95897751043797275, 0.95897868794869678, 0.95897986193142781, 0.95898103240322352, 0.95898219938519358, 0.95898336299843734, 0.95898452349023044, 0.95898568078402524, 0.95898683470385271, 0.9589879854065374, 0.95898913292171817, 0.95899027727444242, 0.95899141847528857, 0.9589925566296269, 0.95899369170680027, 0.9589948237549194, 0.95899595281725036, 0.95899707888636565, 0.95899820200845665, 0.95899932222693185, 0.95900043957528702, 0.95900155401674836, 0.9590026656827858, 0.95900377466624986, 0.95900488087774538, 0.95900598411065163, 0.95900708464967166, 0.95900818235289509, 0.9590092771710611, 0.95901036914933047, 0.95901145835128543, 0.95901254482381437, 0.95901363060282185, 0.95901471415077499, 0.95901579509209678, 0.95901687320474527, 0.95901794840866494, 0.959019020693472, 0.95902009014612521, 0.95902115681917477, 0.95902222079128796, 0.95902328207814092, 0.95902434067577835, 0.95902539661166986, 0.95902645007800513, 0.95902750117506474, 0.95902854791371406, 0.95902959048136693, 0.95903063023762714, 0.95903166751295299, 0.95903270182622424, 0.95903373312530771, 0.95903476168991841, 0.95903578771929365, 0.95903681117845974, 0.95903783199163428, 0.95903885022162905, 0.95903986587758416, 0.95904087901070967, 0.95904188992412265, 0.95904289835764922, 0.95904390440717513, 0.9590449082975353, 0.95904590992933436, 0.95904690908965085, 0.95904790581783939, 0.9590489003292102, 0.95904989263628271, 0.95905088245747783, 0.95905186950788701, 0.95905285425129894, 0.95905383681468115, 0.95905481723999397, 0.95905579553677933, 0.95905677172349912, 0.95905774581380354, 0.95905871783321817, 0.95905968771291827, 0.95906065546040331, 0.95906162112418614, 0.95906258462122018, 0.95906354599302635, 0.95906450532370791, 0.95906546268890047, 0.95906641816047633, 0.95906737174111589, 0.9590683234430406, 0.95906927329642855, 0.95907022152577726, 0.95907116813486282, 0.95907211276080084, 0.95907305558144562, 0.95907399660488146, 0.95907493583401138, 0.95907587327538046, 0.95907680890498193, 0.95907774275741042, 0.95907867487387455, 0.95907960524970604, 0.95908053388480319, 0.95908146076282241, 0.95908238590666184, 0.95908330932876029, 0.95908423107794816, 0.95908515116153314, 0.95908606957879106, 0.95908698631818734, 0.95908790138552558, 0.95908881478437136, 0.95908972646867285, 0.95909063656019455, 0.95909154503924154, 0.9590924528141872, 0.95909336558350511, 0.95909427575127848, 0.95909518401681937, 0.95909609053365075, 0.9590969953404358, 0.95909789844061732, 0.95909879983192536, 0.95909969951793461, 0.95910059750638887, 0.95910149381152909, 0.95910238846769824, 0.95910328146569734, 0.95910417281745874, 0.95910506253420547, 0.95910595062086534, 0.95910683705168887, 0.95910772180396753, 0.9591086048881069, 0.95910948632708981, 0.95911036613350054, 0.95911124432430594, 0.95911212095382825, 0.95911299600407995, 0.95911386954851008, 0.95911474161946852, 0.95911561211360552, 0.95911648100605595, 0.95911734831217244, 0.95911821406089959, 0.959119078271368, 0.95911994094944153, 0.95912080209209893, 0.95912166173289604, 0.95912251989202746, 0.95912337660448077, 0.9591242318963783, 0.95912508577374811, 0.95912593826378301, 0.95912678959269049, 0.95912763952409874, 0.95912848803295669, 0.95912933514786602, 0.95913018089501323, 0.9591310252730102, 0.95913186828232255, 0.95913270994690658, 0.95913355029535141, 0.95913438921326422, 0.95913522684478447, 0.95913606322409128, 0.95913689844460215, 0.95913773263855007, 0.95913856564597122, 0.95913939746499388, 0.95914022812223576, 0.95914105761199797, 0.95914188594930416, 0.95914271315919208, 0.95914353925824525, 0.95914436425024885, 0.95914518814119576, 0.95914601095059282, 0.95914683267525636, 0.95914765329080964, 0.95914847270655312, 0.95914929104529456, 0.95915010823535707, 0.95915092425782655, 0.95915173912262297, 0.95915255264309773, 0.95915336444580479, 0.95915417499886413, 0.95915498415493117, 0.95915579195197387, 0.95915659844522028, 0.95915740364861557, 0.95915820759920023, 0.95915901022661965, 0.95915981149086715, 0.95916061153372956, 0.95916141030624469, 0.95916221253875145, 0.95916301824173933, 0.95916382235224551, 0.95916462493553412, 0.95916542602563148, 0.95916622569225918, 0.95916702399413634, 0.95916782102104714, 0.9591686165895118, 0.95916941035077774, 0.9591702024266231, 0.95917099292126939, 0.95917178192197583, 0.95917256948938268, 0.95917335566962558, 0.95917414050252503, 0.95917492400900162, 0.95917570620292969, 0.95917648709450098, 0.95917726669513159, 0.95917804502419379, 0.95917882209303773, 0.95917959788531559, 0.95918037238524734, 0.95918114560667234, 0.95918191756296212, 0.95918268824219444, 0.95918345764194957, 0.95918422580403406, 0.95918499284967518, 0.95918575898442326, 0.95918652394775195, 0.95918728737876635, 0.95918804930243529, 0.95918880979907273, 0.95918956892777307, 0.9591903267039027, 0.95919108162096633, 0.9591918375376246, 0.95919259215463293, 0.95919334539760692, 0.95919409729551131, 0.95919484786871434, 0.95919559713058555, 0.95919634478710691, 0.95919709089784488, 0.9591978354346764, 0.95919857857065527, 0.95919932041311462, 0.9592000605647597, 0.95920079971750227, 0.95920153758409876, 0.95920227408347736, 0.95920300922345214, 0.95920374303254397, 0.95920447555639932, 0.95920520678470789, 0.95920593628474027, 0.95920666085121487, 0.95920738429556107, 0.95920810613560437, 0.95920882634253291, 0.95920954495914423, 0.959210261991298, 0.9592109775622284, 0.95921169167180653, 0.95921240434804755, 0.9592131156196616, 0.95921382551335166, 0.9592145341824666, 0.9592152417304326, 0.95921594737565219, 0.95921665139878864, 0.95921735388667473, 0.95921805484824829, 0.95921875428438474, 0.95921945220922666, 0.95922014864228011, 0.9592208435967029, 0.95922153712290981, 0.95922222927737888, 0.95922292008272358, 0.95922360956877484, 0.95922429774498885, 0.95922498641735432, 0.95922567532734371, 0.95922636281029805, 0.95922704903613343, 0.95922773405027173, 0.95922841781956114, 0.95922910014347429, 0.95922978125466773, 0.95923046128971834, 0.95923114004824739, 0.95923181761895004, 0.95923249391074572, 0.95923316895038846, 0.95923384307735404, 0.95923451662077386, 0.9592351890828319, 0.95923586039321662, 0.95923653051246105, 0.95923719942677155, 0.9592378671709032, 0.95923853376547541, 0.95923919921916123, 0.95923986355433422, 0.95924052680544014, 0.95924118898677513, 0.95924185009188068, 0.95924251012867934, 0.95924316910495344, 0.95924382702700151, 0.95924448386116179]} ''' # Plot the MSE values plt.figure() plt.plot(NMF.all_performances['MSE']) plt.ylim(0,10)
1,229.352941
61,573
0.850918
6,185
62,697
8.620857
0.499596
0.00045
0.000375
0
0
0
0
0
0
0
0
0.882509
0.050274
62,697
51
61,574
1,229.352941
0.012948
0.002297
0
0
0
0
0.085995
0
0
0
0
0
0
0
null
null
0
0.173913
null
null
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
d3f9ed09ec808d9b401b39bbc65e65acef753e60
451
py
Python
python-acoustics/tests/test_decibel.py
chipmuenk/acoustics
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
[ "MIT" ]
1
2019-03-28T12:46:26.000Z
2019-03-28T12:46:26.000Z
python-acoustics/tests/test_decibel.py
chipmuenk/acoustics
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
[ "MIT" ]
null
null
null
python-acoustics/tests/test_decibel.py
chipmuenk/acoustics
c85ac95a10c09d7fa15d63b2bdb24acab89fec60
[ "MIT" ]
2
2017-08-25T22:12:17.000Z
2021-09-12T09:50:02.000Z
from acoustics.decibel import * def test_dbsum(): assert(abs(dbsum([10.0, 10.0]) - 13.0103) < 1e-5) def test_dbmean(): assert(dbmean([10.0, 10.0]) == 10.0) def test_dbadd(): assert(abs(dbadd(10.0, 10.0) - 13.0103) < 1e-5) def test_dbsub(): assert(abs(dbsub(13.0103, 10.0) - 10.0) < 1e-5) def test_dbmul(): assert(abs(dbmul(10.0, 2) - 13.0103) < 1e-5) def test_dbdiv(): assert(abs(dbdiv(13.0103, 2) - 10.0) < 1e-5)
23.736842
53
0.592018
83
451
3.144578
0.253012
0.126437
0.095785
0.114943
0.229885
0.229885
0.168582
0.168582
0.168582
0.168582
0
0.206044
0.192905
451
19
54
23.736842
0.510989
0
0
0
0
0
0
0
0
0
0
0
0.461538
1
0.461538
true
0
0.076923
0
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
1
0
0
5
d3ff3d9a488f13b0dd951d12d50cd2b5a3a4d912
453
py
Python
qiling/qiling/os/posix/syscall/ptrace.py
mrTavas/owasp-fstm-auto
6e9ff36e46d885701c7419db3eca15f12063a7f3
[ "CC0-1.0" ]
2
2021-05-05T12:03:01.000Z
2021-06-04T14:27:15.000Z
qiling/qiling/os/posix/syscall/ptrace.py
mrTavas/owasp-fstm-auto
6e9ff36e46d885701c7419db3eca15f12063a7f3
[ "CC0-1.0" ]
null
null
null
qiling/qiling/os/posix/syscall/ptrace.py
mrTavas/owasp-fstm-auto
6e9ff36e46d885701c7419db3eca15f12063a7f3
[ "CC0-1.0" ]
2
2021-05-05T12:03:09.000Z
2021-06-04T14:27:21.000Z
#!/usr/bin/env python3 # # Cross Platform and Multi Architecture Advanced Binary Emulation Framework # from qiling.const import * from qiling.os.linux.thread import * from qiling.const import * from qiling.os.posix.filestruct import * from qiling.os.filestruct import * from qiling.os.posix.const_mapping import * from qiling.exception import * def ql_syscall_ptrace(ql, request, pid, addr, data, *args, **kw): regreturn = 0 return regreturn
25.166667
75
0.761589
64
453
5.34375
0.578125
0.204678
0.280702
0.210526
0.385965
0.192982
0.192982
0
0
0
0
0.005168
0.145695
453
17
76
26.647059
0.878553
0.209713
0
0.2
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.7
0
0.9
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
312a4a6e36355937b26c0931cb3bd6ddf1f198fa
165
py
Python
src/encoder.py
ralphtatt-IW/BrIW
c7ffd83ba78c8ab07bbb09ded291033f784e3751
[ "MIT" ]
null
null
null
src/encoder.py
ralphtatt-IW/BrIW
c7ffd83ba78c8ab07bbb09ded291033f784e3751
[ "MIT" ]
null
null
null
src/encoder.py
ralphtatt-IW/BrIW
c7ffd83ba78c8ab07bbb09ded291033f784e3751
[ "MIT" ]
3
2019-09-18T10:44:13.000Z
2019-10-17T19:58:52.000Z
import classes from json import JSONEncoder class MyEncoder(JSONEncoder): def default(self, obj): return obj.__dict__ def drink_decoder(obj): pass
16.5
29
0.727273
21
165
5.47619
0.761905
0
0
0
0
0
0
0
0
0
0
0
0.206061
165
9
30
18.333333
0.877863
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0.142857
0.285714
0.142857
0.857143
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
0
0
0
5
31420a5fd6016066f6e06834b82458c235829c75
45
py
Python
home/hairygael/GESTURES/headdown.py
rv8flyboy/pyrobotlab
4e04fb751614a5cb6044ea15dcfcf885db8be65a
[ "Apache-2.0" ]
63
2015-02-03T18:49:43.000Z
2022-03-29T03:52:24.000Z
home/hairygael/GESTURES/headdown.py
hirwaHenryChristian/pyrobotlab
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
[ "Apache-2.0" ]
16
2016-01-26T19:13:29.000Z
2018-11-25T21:20:51.000Z
home/hairygael/GESTURES/headdown.py
hirwaHenryChristian/pyrobotlab
2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9
[ "Apache-2.0" ]
151
2015-01-03T18:55:54.000Z
2022-03-04T07:04:23.000Z
def headdown(): i01.head.neck.moveTo(0)
11.25
27
0.644444
7
45
4.142857
1
0
0
0
0
0
0
0
0
0
0
0.081081
0.177778
45
3
28
15
0.702703
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
5
31562706fb190b50879d2fb5c3d516fb787f67f8
1,898
py
Python
user_database.py
poudelroshan/BOTVID-19
810fb9a97ddb854270e3fc1ec1e4c74ddf4d2aa4
[ "MIT" ]
23
2020-04-27T03:43:39.000Z
2020-08-31T16:11:51.000Z
user_database.py
laranea/BOTVID-19
d7b87fd2bec7e82863a4a3fde63bb2262ba6a120
[ "MIT" ]
null
null
null
user_database.py
laranea/BOTVID-19
d7b87fd2bec7e82863a4a3fde63bb2262ba6a120
[ "MIT" ]
10
2020-04-27T10:35:10.000Z
2020-08-12T15:57:17.000Z
import authenticate ''' #ONLY USE FOR THE FIRST TIME TO CREATE THE TABLE connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() cursor.execute("""CREATE TABLE users(user_id BigInt)""") connection.commit() ''' def add_user(user_id): connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() print("The following user id is being added") cursor.execute("INSERT INTO users VALUES (%s)", (user_id,)) connection.commit() cursor.close() connection.close() print("User ", user_id, " Added to Database") def remove_user(user__id): connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() cursor.execute("DELETE FROM users WHERE user_id = %s", (user__id,)) connection.commit() cursor.close() connection.close() print("User ", user__id, " Removed from Database") def is_user_subscribed(user__id): connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() cursor.execute("SELECT * FROM users WHERE user_id= %s", (user__id,)) users = cursor.fetchall() cursor.close() connection.close() return len(users) != 0 def get_total_users(): connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() cursor.execute("SELECT * FROM users") users = cursor.fetchall() cursor.close() connection.close() return len(users) def get_users(): connection = authenticate.connection connection.ping(reconnect=True) cursor = connection.cursor() cursor.execute("SELECT user_id FROM users") user_ids = cursor.fetchall() cursor.close() connection.close() return [x[0] for x in user_ids]
27.507246
72
0.672813
219
1,898
5.712329
0.232877
0.06235
0.153477
0.201439
0.761791
0.761791
0.761791
0.72502
0.694644
0.694644
0
0.001336
0.211275
1,898
68
73
27.911765
0.834335
0
0
0.617021
0
0
0.144099
0
0
0
0
0
0
1
0.106383
false
0
0.021277
0
0.191489
0.06383
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
31badab1c3f7c1ef2cb953ce0d9d2eea803ed448
150
py
Python
Python Basics/5. While Loop/02. Password.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
Python Basics/5. While Loop/02. Password.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
Python Basics/5. While Loop/02. Password.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
name = input() password = input() input_password = input() while input_password != password: input_password = input() print(f"Welcome {name}!")
16.666667
33
0.693333
18
150
5.611111
0.388889
0.514851
0.534653
0
0
0
0
0
0
0
0
0
0.16
150
9
34
16.666667
0.801587
0
0
0.333333
0
0
0.099338
0
0
0
0
0
0
1
0
false
0.666667
0
0
0
0.166667
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
31c16aeccf9f9bb1c6a0af2bdcf8f91b42ce3ce6
199
py
Python
utils/_xjson.py
kollad/turbo-ninja
9c3f66b2af64aec01f522d19b309cfdd723e67cf
[ "MIT" ]
null
null
null
utils/_xjson.py
kollad/turbo-ninja
9c3f66b2af64aec01f522d19b309cfdd723e67cf
[ "MIT" ]
1
2017-12-14T05:35:38.000Z
2017-12-14T05:35:38.000Z
utils/_xjson.py
kollad/turbo-ninja
9c3f66b2af64aec01f522d19b309cfdd723e67cf
[ "MIT" ]
null
null
null
from zlib import compress, decompress from json import dumps as _dumps from json import loads as _loads def dumps(s): return compress(_dumps(s)) def loads(s): return _loads(decompress(s))
18.090909
37
0.743719
31
199
4.645161
0.387097
0.111111
0.194444
0
0
0
0
0
0
0
0
0
0.180905
199
11
38
18.090909
0.883436
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.428571
0.285714
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
5
9ef0857f8c57f77420f6825427c2f31f7cd405ea
212
py
Python
protonfixes/gamefixes/445310.py
Coolthulhu/protonfixes
a7dc52727ac2737cb17f739e0123df9b1b4c49d1
[ "BSD-2-Clause" ]
213
2018-10-06T01:40:26.000Z
2022-03-16T16:17:37.000Z
protonfixes/gamefixes/445310.py
Coolthulhu/protonfixes
a7dc52727ac2737cb17f739e0123df9b1b4c49d1
[ "BSD-2-Clause" ]
88
2018-10-06T17:38:56.000Z
2022-02-19T13:27:26.000Z
protonfixes/gamefixes/445310.py
Coolthulhu/protonfixes
a7dc52727ac2737cb17f739e0123df9b1b4c49d1
[ "BSD-2-Clause" ]
67
2018-10-09T16:57:16.000Z
2022-03-14T13:06:25.000Z
""" Game fix for Might & Magic: Heroes VII - Trial by Fire """ #pylint: disable=C0103 from protonfixes import util def main(): """ Install uplay """ # Install uplay util.protontricks('uplay')
15.142857
58
0.636792
26
212
5.192308
0.846154
0.177778
0
0
0
0
0
0
0
0
0
0.024691
0.235849
212
13
59
16.307692
0.808642
0.514151
0
0
0
0
0.056818
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
5
9ef70d825ad647eac80227f8c6ded76a04686140
4,937
py
Python
wangzhansen/project/myplane/enemy.py
python20180319howmework/homework
c826d7aa4c52f8d22f739feb134d20f0b2c217cd
[ "Apache-2.0" ]
null
null
null
wangzhansen/project/myplane/enemy.py
python20180319howmework/homework
c826d7aa4c52f8d22f739feb134d20f0b2c217cd
[ "Apache-2.0" ]
null
null
null
wangzhansen/project/myplane/enemy.py
python20180319howmework/homework
c826d7aa4c52f8d22f739feb134d20f0b2c217cd
[ "Apache-2.0" ]
null
null
null
import pygame import random class SmallEnemy(pygame.sprite.Sprite): def __init__(self, bg_size): pygame.sprite.Sprite.__init__(self) # 小敌机图片--->Surface self.image = pygame.image.load("./images/enemy1.png").convert_alpha() self.rect = self.image.get_rect() # 撞击图片 self.destroy_image = [pygame.image.load("./images/enemy1_down1.png").convert_alpha(), \ pygame.image.load("./images/enemy1_down2.png").convert_alpha(), \ pygame.image.load("./images/enemy1_down3.png").convert_alpha(), \ pygame.image.load("./images/enemy1_down4.png").convert_alpha()] # 背景的宽度, 高度 self.width = bg_size.width self.height = bg_size.height # 位置 self.rect.left, self.rect.top = (random.randint(0, self.width-self.rect.width),\ random.randint(-5*self.height, 0)) # 速度 self.speed = 2 # 非透明区 self.mask = pygame.mask.from_surface(self.image) # 状态 self.alive = True # 移动 def move(self): if self.rect.top >= self.height-self.rect.height-50: self.reset() else: self.rect.top += self.speed def reset(self): self.alive = True self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \ random.randint(-5 * self.height, 0)) class MidEnemy(pygame.sprite.Sprite): energy = 10 def __init__(self, bg_size): pygame.sprite.Sprite.__init__(self) # 小敌机图片--->Surface self.image = pygame.image.load("./images/enemy2.png") self.rect = self.image.get_rect() # 撞击图片 self.destroy_image = [pygame.image.load("./images/enemy2_down1.png").convert_alpha(), \ pygame.image.load("./images/enemy2_down2.png").convert_alpha(), \ pygame.image.load("./images/enemy2_down3.png").convert_alpha(), \ pygame.image.load("./images/enemy2_down4.png").convert_alpha()] # 背景的宽度, 高度 self.width = bg_size.width self.height = bg_size.height # 实例能量 self.energy = MidEnemy.energy # 位置 self.rect.left, self.rect.top = (random.randint(0, self.width-self.rect.width),\ random.randint(-10*self.height, -5 * self.height)) # 速度 self.speed = 1 # 非透明区 self.mask = pygame.mask.from_surface(self.image) # 状态 self.alive = True # 移动 def move(self): if self.rect.top >= self.height-self.rect.height-50: self.reset() else: self.rect.top += self.speed def reset(self): self.alive = True self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \ random.randint(-10 * self.height, -5 * self.height)) self.energy = MidEnemy.energy class BigEnemy(pygame.sprite.Sprite): energy = 20 def __init__(self, bg_size): pygame.sprite.Sprite.__init__(self) # 小敌机图片--->Surface self.image1 = pygame.image.load("./images/enemy3_n1.png") self.image2 = pygame.image.load("./images/enemy3_n2.png") self.rect = self.image1.get_rect() # 撞击图片 self.destroy_image = [pygame.image.load("./images/enemy3_down1.png").convert_alpha(), \ pygame.image.load("./images/enemy3_down2.png").convert_alpha(), \ pygame.image.load("./images/enemy3_down3.png").convert_alpha(), \ pygame.image.load("./images/enemy3_down4.png").convert_alpha(), \ pygame.image.load("./images/enemy3_down5.png").convert_alpha(), \ pygame.image.load("./images/enemy3_down6.png").convert_alpha(),] # 背景的宽度, 高度 self.width = bg_size.width self.height = bg_size.height self.energy = BigEnemy.energy # 位置 self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \ random.randint(-15 * self.height, -10*self.height)) # 速度 self.speed = 1 # 非透明区 self.mask = pygame.mask.from_surface(self.image1) # 状态 self.alive = True # 移动 def move(self): if self.rect.top >= self.height - self.rect.height - 50: self.reset() else: self.rect.top += self.speed def reset(self): self.alive = True self.rect.left, self.rect.top = (random.randint(0, self.width - self.rect.width), \ random.randint(-15 * self.height, -10*self.height)) self.energy = BigEnemy.energy
38.874016
95
0.542637
570
4,937
4.57193
0.121053
0.092095
0.103607
0.14505
0.894091
0.85495
0.845741
0.845741
0.65119
0.65119
0
0.022355
0.320438
4,937
126
96
39.18254
0.754396
0.030585
0
0.674419
0
0
0.090794
0.082808
0
0
0
0
0
1
0.104651
false
0
0.023256
0
0.186047
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
73075aad0f3af2d48d6ed7df0e179d538f0d6374
115
py
Python
resonances/__main__.py
apetrov/resonances
50be33536965e6a78371282a2d1803c53f11d112
[ "MIT" ]
4
2015-11-04T11:23:00.000Z
2021-08-04T20:27:42.000Z
resonances/__main__.py
apetrov/resonances
50be33536965e6a78371282a2d1803c53f11d112
[ "MIT" ]
1
2021-08-04T20:57:22.000Z
2021-08-07T09:17:14.000Z
resonances/__main__.py
apetrov/resonances
50be33536965e6a78371282a2d1803c53f11d112
[ "MIT" ]
1
2021-08-04T20:49:16.000Z
2021-08-04T20:49:16.000Z
if __name__ == '__main__': print('You have installed the resonances package. Check please the documentation!')
38.333333
87
0.747826
14
115
5.571429
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.156522
115
2
88
57.5
0.804124
0
0
0
0
0
0.713043
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
7310326f933d7ed0810e171256df11a86c8241e6
12,026
py
Python
App.py
The-Osk/Fetal_Health_classification
66e872862ef42f2a179cb502adc64984110d9500
[ "MIT" ]
null
null
null
App.py
The-Osk/Fetal_Health_classification
66e872862ef42f2a179cb502adc64984110d9500
[ "MIT" ]
null
null
null
App.py
The-Osk/Fetal_Health_classification
66e872862ef42f2a179cb502adc64984110d9500
[ "MIT" ]
null
null
null
from logging import log from os import write from joblib import dump, load import pandas as pd import numpy as np from scipy.sparse import data import streamlit as st from sklearn.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, f1_score from sklearn.preprocessing import StandardScaler scaler = StandardScaler() testSet = pd.read_csv("TestSet.csv") testSet = testSet.drop(["fetal_health"], axis = 1) clf = load('VCHard_model.joblib') user_inputs = {} zeroValue = 0.00 cases = {1: "Normal" , 2: "Suspect", 3: "Pathological"} class Doctor: def __init__(self): patient_id = st.number_input("patient ID: ", min_value=0, step = 1) if st.button('Show & Predict'): if testSet.loc[0].size > patient_id: patientSearchedFor = testSet.loc[patient_id] st.write(patientSearchedFor) col_names = list(patientSearchedFor) X_df = scaler.fit_transform([patientSearchedFor]) X_df = pd.DataFrame(X_df, columns = col_names) y = clf.predict(X_df) st.write(f'The patient is {cases[y[0]]}') col1, col2= st.beta_columns(2) with col1: if st.button('Agree with the result'): st.write("Databases Has been updated") with col2: if st.button('Disagree with the result'): st.write("Databases Has been updated") else: st.write("Wrong patient ID") class DataEntry: def __init__(self): user_inputs = {} st.write("Enter the patient Data: ") st.number_input("Patient ID: ", min_value=0, step = 1) col1, col2, col3= st.beta_columns(3) with col1: user_inputs["baseline value"] = [st.number_input("baseline value: ", min_value=0, step = 1)] user_inputs["accelerations"] = [st.number_input("accelerations: ", min_value=0.000, step = 0.001, format= "%.3f") ] user_inputs["fetal_movement"] = [st.number_input("fetal_movement: ", min_value=0.000, step = 0.001, format= "%.3f")] user_inputs["uterine_contractions"] = [st.number_input("uterine_contractions: ", min_value=0.000, step = 0.001, format= "%.3f")] user_inputs["light_decelerations"] = [st.number_input("light_decelerations: ", min_value=0.0000, step = 0.001, format= "%.3f")] user_inputs["severe_decelerations"] = [st.number_input("severe_decelerations: ", min_value=0.000, step = 0.01)] user_inputs["prolongued_decelerations"] = [st.number_input("prolongued_decelerations: ", min_value=0.000, step = 0.001, format= "%.3f")] with col2: user_inputs["abnormal_short_term_variability"] = [st.number_input("abnormal_short_term_variability: ", min_value=0, step = 1)] user_inputs["mean_value_of_short_term_variability"] = [st.number_input("mean_value_of_short_term_variability: ", min_value=0.0, step = 0.1, format= "%.1f")] user_inputs["percentage_of_time_with_abnormal_long_term_variability"] = [st.number_input("percentage_of_time_with_abnormal_long_term_variability: ", min_value=0, step = 1)] user_inputs["mean_value_of_long_term_variability"] = [st.number_input("mean_value_of_long_term_variability: ", min_value=0.0, step = 0.1, format= "%.1f")] user_inputs["histogram_width"] = [st.number_input("histogram_width: ", min_value=0, step = 1)] user_inputs["histogram_min"] = [st.number_input("histogram_min: ", min_value=0, step = 1)] user_inputs["histogram_max"] = [st.number_input("histogram_max: ", min_value=0, step = 1)] with col3: user_inputs["histogram_number_of_peaks"] = [st.number_input("histogram_number_of_peaks: ", min_value=0, step = 1)] user_inputs["histogram_number_of_zeroes"] = [st.number_input("histogram_number_of_zeroes: ", min_value=0, step = 1)] user_inputs["histogram_mode"] = [st.number_input("histogram_mode: ", min_value=0, step = 1)] user_inputs["histogram_mean"] =[ st.number_input("histogram_mean: ", min_value=0, step = 1)] user_inputs["histogram_median"] = [st.number_input("histogram_median: ", min_value=0, step = 1)] user_inputs["histogram_variance"] = [st.number_input("histogram_variance: ", min_value=0, step = 1)] user_inputs["histogram_tendency"] = [st.number_input("histogram_tendency: ", min_value=0, step = 1)] if st.button("Save Data"): st.write("Patient Data Saved") class DataScientist: def __init__(self): operation = st.radio("Operation ", ("Enter Data", "Review Data")) if operation == "Enter Data": dataSource = st.radio("How are you entering the data: ", ("From a CSV file", "Enter manually")) if dataSource == "From a CSV file": CSVfile = st.file_uploader("Upload data file", type=None, accept_multiple_files=False, key=None, help=None) if CSVfile: df = pd.read_csv(CSVfile) y = df['fetal_health'] df = df.drop(["fetal_health"], axis = 1) st.write(df) col_names = list(df) X_df = scaler.fit_transform(df) X_df = pd.DataFrame(X_df, columns = col_names) y = clf.predict(X_df) if st.button('Predict CSV'): predictionY = clf.predict(X_df) acc =accuracy_score(y, predictionY) #st.write(acc) for i,j in enumerate(y): st.write(f'Patient {i} is {cases[y[i]]}') else: col1, col2, col3= st.beta_columns(3) with col1: user_inputs["baseline value"] = [st.number_input("baseline value: ", min_value=0, step = 1)] user_inputs["accelerations"] = [st.number_input("accelerations: ", min_value=0.000, step = 0.001, format= "%.3f") ] user_inputs["fetal_movement"] = [st.number_input("fetal_movement: ", min_value=0.000, step = 0.001, format= "%.3f")] user_inputs["uterine_contractions"] = [st.number_input("uterine_contractions: ", min_value=0.000, step = 0.001, format= "%.3f")] user_inputs["light_decelerations"] = [st.number_input("light_decelerations: ", min_value=0.0000, step = 0.001, format= "%.3f")] user_inputs["severe_decelerations"] = [st.number_input("severe_decelerations: ", min_value=0.000, step = 0.01)] user_inputs["prolongued_decelerations"] = [st.number_input("prolongued_decelerations: ", min_value=0.000, step = 0.001, format= "%.3f")] with col2: user_inputs["abnormal_short_term_variability"] = [st.number_input("abnormal_short_term_variability: ", min_value=0, step = 1)] user_inputs["mean_value_of_short_term_variability"] = [st.number_input("mean_value_of_short_term_variability: ", min_value=0.0, step = 0.1, format= "%.1f")] user_inputs["percentage_of_time_with_abnormal_long_term_variability"] = [st.number_input("percentage_of_time_with_abnormal_long_term_variability: ", min_value=0, step = 1)] user_inputs["mean_value_of_long_term_variability"] = [st.number_input("mean_value_of_long_term_variability: ", min_value=0.0, step = 0.1, format= "%.1f")] user_inputs["histogram_width"] = [st.number_input("histogram_width: ", min_value=0, step = 1)] user_inputs["histogram_min"] = [st.number_input("histogram_min: ", min_value=0, step = 1)] user_inputs["histogram_max"] = [st.number_input("histogram_max: ", min_value=0, step = 1)] with col3: user_inputs["histogram_number_of_peaks"] = [st.number_input("histogram_number_of_peaks: ", min_value=0, step = 1)] user_inputs["histogram_number_of_zeroes"] = [st.number_input("histogram_number_of_zeroes: ", min_value=0, step = 1)] user_inputs["histogram_mode"] = [st.number_input("histogram_mode: ", min_value=0, step = 1)] user_inputs["histogram_mean"] =[ st.number_input("histogram_mean: ", min_value=0, step = 1)] user_inputs["histogram_median"] = [st.number_input("histogram_median: ", min_value=0, step = 1)] user_inputs["histogram_variance"] = [st.number_input("histogram_variance: ", min_value=0, step = 1)] user_inputs["histogram_tendency"] = [st.number_input("histogram_tendency: ", min_value=0, step = 1)] inputs_pd = pd.DataFrame(data = user_inputs) if st.button('Predict'): col_names = list(inputs_pd) X_df = scaler.fit_transform(inputs_pd) X_df = pd.DataFrame(X_df, columns = col_names) y = clf.predict([inputs_pd.loc[0]]) st.write(f'The patient is {cases[y[0]]}') zeroValue = 0.00 else: patient_id = st.number_input("patient ID: ", min_value=0, step = 1) if st.button('Show & Predict'): if testSet.loc[0].size > patient_id: patientSearchedFor = testSet.loc[patient_id] st.write(patientSearchedFor) col_names = list(patientSearchedFor) X_df = scaler.fit_transform([patientSearchedFor]) X_df = pd.DataFrame(X_df, columns = col_names) y = clf.predict([patientSearchedFor]) st.write(f'The patient is {cases[y[0]]}') col1, col2= st.beta_columns(2) with col1: st.write("Doctor Agreed with the model Prediction") else: st.write("Wrong patient ID") class Login: def __init__(self): logedin = True usernameInput = st.empty() usernamePassword = st.empty() loginBtn = st.empty() if logedin: x = st.sidebar.radio("What type of user are you: ", ("Doctor", "DataEntry", "DataScientist")) if x == "Doctor": d = Doctor() elif x == "DataEntry": d = DataEntry() else: d = DataScientist() else: username = usernameInput.text_input("Enter Username: ", "osama") password = usernamePassword.text_input("Enter Password: ", "3", type="password") if loginBtn.button("Login"): if username == "osama" and password == "3": logedin = True x = st.sidebar.radio("What type of user are you: ", ("Doctor", "DataEntry", "DataScientist")) if x == "Doctor": d = Doctor() elif x == "DataEntry": d = DataEntry() else: d = DataScientist() usernameInput.empty() usernamePassword.empty() loginBtn.empty() else: st.write("invalid user or password") #user_inputs["fetal_health"] = st.number_input("fetal_health: ", min_value=0.00, step = 0.01) logedin = False i = 0 st.set_page_config(page_title="Fetal Health Prediction System", page_icon="🧊",layout="wide", initial_sidebar_state="expanded") st.write(""" # Fetal Health Prediction System """) if not logedin: l = Login() logedin = True logedin = True
43.103943
196
0.577748
1,416
12,026
4.641243
0.143362
0.069994
0.090992
0.057365
0.726567
0.71409
0.71409
0.70496
0.703439
0.686245
0
0.027689
0.297273
12,026
278
197
43.258993
0.749852
0.008731
0
0.622222
0
0
0.241964
0.085774
0
0
0
0
0
1
0.022222
false
0.027778
0.05
0
0.094444
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
73162855c04df2714dca510a374fce510b70e7be
348
py
Python
flatdata-generator/flatdata/generator/tree/nodes/resources/rawdata.py
gferon/flatdata
8839fb36be105e496fea8acc3fc907ae878dd063
[ "Apache-2.0" ]
140
2018-01-26T21:59:38.000Z
2022-02-17T10:23:29.000Z
flatdata-generator/flatdata/generator/tree/nodes/resources/rawdata.py
gferon/flatdata
8839fb36be105e496fea8acc3fc907ae878dd063
[ "Apache-2.0" ]
114
2018-01-26T17:49:20.000Z
2021-11-26T13:27:08.000Z
flatdata-generator/flatdata/generator/tree/nodes/resources/rawdata.py
gferon/flatdata
8839fb36be105e496fea8acc3fc907ae878dd063
[ "Apache-2.0" ]
22
2018-01-26T16:51:24.000Z
2021-04-27T13:32:44.000Z
from .base import ResourceBase class RawData(ResourceBase): def __init__(self, name, properties=None): super(RawData, self).__init__(name=name, properties=properties) @staticmethod def create(properties): return RawData(name=properties.name, properties=properties) def create_references(self): return []
24.857143
71
0.70977
37
348
6.432432
0.459459
0.235294
0.201681
0
0
0
0
0
0
0
0
0
0.192529
348
13
72
26.769231
0.846975
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.111111
0.222222
0.777778
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
7320cf2d634e5378844d60a8891d3ac683001bc5
21,179
py
Python
tests/blackbox/scopes/test_bb_scopes_updates.py
rohitjoshi/waflz
220945e6472762af8d8d7e0849699adcfd488605
[ "Apache-2.0" ]
null
null
null
tests/blackbox/scopes/test_bb_scopes_updates.py
rohitjoshi/waflz
220945e6472762af8d8d7e0849699adcfd488605
[ "Apache-2.0" ]
null
null
null
tests/blackbox/scopes/test_bb_scopes_updates.py
rohitjoshi/waflz
220945e6472762af8d8d7e0849699adcfd488605
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 '''Test config updates ''' # ------------------------------------------------------------------------------ # Imports # ------------------------------------------------------------------------------ import pytest import subprocess import os import sys import json import time import requests import base64 import time import datetime # ------------------------------------------------------------------------------ # Constants # ------------------------------------------------------------------------------ G_TEST_HOST = 'http://127.0.0.1:12345' # ------------------------------------------------------------------------------ # run_command # ------------------------------------------------------------------------------ def run_command(command): p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr) # ------------------------------------------------------------------------------ # setup scopez server in action mode # ------------------------------------------------------------------------------ @pytest.fixture() def setup_scopez_server_action(): # ------------------------------------------------------ # setup # ------------------------------------------------------ l_cwd = os.getcwd() l_file_path = os.path.dirname(os.path.abspath(__file__)) l_geoip2city_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-City.mmdb')) l_geoip2ISP_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-ASN.mmdb')) l_conf_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf')) l_ruleset_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/ruleset')) l_scopez_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes')) l_scopez_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/scopez_server/scopez_server')) l_subproc = subprocess.Popen([l_scopez_server_path, '-d', l_conf_dir, '-S', l_scopez_dir, '-r', l_ruleset_path, '-g', l_geoip2city_path, '-i', l_geoip2ISP_path, '-a', '-b']) print('cmd: {}'.format(' '.join([l_scopez_server_path, '-d', l_conf_dir, '-S', l_scopez_dir, '-r', l_ruleset_path, '-g', l_geoip2city_path, '-i', l_geoip2ISP_path, '-a', '-b']))) time.sleep(1) # ------------------------------------------------------ # yield... # ------------------------------------------------------ yield setup_scopez_server_action # ------------------------------------------------------ # tear down # ------------------------------------------------------ l_code, l_out, l_err = run_command('kill -9 %d'%(l_subproc.pid)) time.sleep(0.5) def test_acl_config_update(setup_scopez_server_action): ''' update acl config 0050-ZrLf2KkQ - remove gizoogle from user agent black list and test if request returns 200 ''' # ------------------------------------------------------ # test an 0050 with user-agent acl 'gizoogle' in the # request # ------------------------------------------------------ l_uri = G_TEST_HOST l_headers = {'host': 'monkeez.com', 'user-agent': 'gizoogle', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is acl custom response\n' #------------------------------------------------------- # load acl config and remove gizoogle from blacklist # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_acl_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/acl/0050-ZrLf2KkQ.acl.json')) try: with open(l_acl_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_acl_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf['user_agent']['blacklist'] = [] l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') # ------------------------------------------------------ # post/update acl conf # ------------------------------------------------------ l_url = '%s/update_acl'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json', 'waf-scopes-id': '0050'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 # ------------------------------------------------------ # blacklist should have been updated and should get 200 #------------------------------------------------------- l_uri = G_TEST_HOST l_headers = {'host': 'monkeez.com', 'user-agent': 'gizoogle', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 def test_rules_config_update(setup_scopez_server_action): ''' update rules config 0050-ZrLf3KKq.rules.json - change user agent to Donkeez from Monkeez ''' # ------------------------------------------------------ # test an 0050 with user-agent 'Monkeez' in the # request # ------------------------------------------------------ l_uri = G_TEST_HOST l_headers = {'host': 'monkeez.com', 'user-agent': 'monkeez', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is rules custom response\n' #------------------------------------------------------- # load rules config and changes monkeez to donkeez in # custom rules # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_rules_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/rules/0050-ZrLf3KkQ.rules.json')) try: with open(l_rules_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf['directive'][1]['sec_rule']['operator']['value'] = 'donkeez' l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') # ------------------------------------------------------ # post/update rules conf # ------------------------------------------------------ l_url = '%s/update_rules'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json', 'waf-scopes-id': '0050'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 # ------------------------------------------------------ # test again with user-agent 'Monkeez' in the # request. It should pass # ------------------------------------------------------ l_uri = G_TEST_HOST l_headers = {'host': 'monkeez.com', 'user-agent': 'monkeez', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 # ------------------------------------------------------ # test with user-agent 'donkeez' in the # request. should be blocked # ------------------------------------------------------ l_uri = G_TEST_HOST l_headers = {'host': 'monkeez.com', 'user-agent': 'donkeez', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is rules custom response\n' def test_profile_config_update(setup_scopez_server_action): ''' update profile config 0050-YrLf3KkQ.wafprof.json - change ignore_query_args to test from ignore ''' # ------------------------------------------------------ # test an 0050 with sql injection # ------------------------------------------------------ l_uri = G_TEST_HOST+'/profile.html?a=%27select%20*%20from%20testing%27' l_headers = {'host': 'monkeez.com', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is profile custom response\n' # ------------------------------------------------------ # test an 0050 with sql injection and query_args "ignore" # ------------------------------------------------------ l_uri = G_TEST_HOST+'/profile.html?ignore=%27select%20*%20from%20testing%27' l_headers = {'host': 'monkeez.com', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 #------------------------------------------------------- # load profile config and change "ignore_query_args" # to "test" # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_profile_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/profile/0050-YrLf3KkQ.wafprof.json')) try: with open(l_profile_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_profile_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf["general_settings"]["ignore_query_args"] = ["test"] l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') # ------------------------------------------------------ # post/update profile conf # ------------------------------------------------------ l_url = '%s/update_profile'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json', 'waf-scopes-id': '0050'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 # ------------------------------------------------------ # test an 0050 with sql injection and query_args "ignore" # should get 403 # ------------------------------------------------------ l_uri = G_TEST_HOST+'/profile.html?ignore=%27select%20*%20from%20testing%27' l_headers = {'host': 'monkeez.com', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is profile custom response\n' # ------------------------------------------------------ # test an 0050 with sql injection and query_args "test" # sql injection should be ignored and get 200 # ------------------------------------------------------ l_uri = G_TEST_HOST+'/profile.html?test=%27select%20*%20from%20testing%27' l_headers = {'host': 'monkeez.com', 'waf-scopes-id': '0050'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 def test_limit_config_update(setup_scopez_server_action): # ------------------------------------------------------ # Make 3 request in 5 sec for 3rd and # 4th scope. Third request should get rate limited # ------------------------------------------------------ l_uri = G_TEST_HOST+'/test.html' l_headers = {'host': 'limit.com', 'waf-scopes-id': '0050'} for x in range(2): l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is ddos custom response\n' l_uri = G_TEST_HOST+'/test.html' l_headers = {'host': 'test.limit.com', 'waf-scopes-id': '0050'} for x in range(2): l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'custom response for limits from limit_id_2\n' # ------------------------------------------------------ # sleep for 10 seconds. Enforcements should expire # ------------------------------------------------------ time.sleep(10) #------------------------------------------------------- # load limit config and change duration_sec to 3 # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_limit_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/limit/0050-MjMhNXMR.limit.json')) try: with open(l_limit_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_limit_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf["num"] = 3 l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') #------------------------------------------------------- # POST conf # ------------------------------------------------------ l_url = '%s/update_limit'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json', 'waf-scopes-id': '0050'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 # ------------------------------------------------------ # Make 4 request in 5 sec. fourth request should get # rate limited. Third request shouldn't be blocked # because of the update # ------------------------------------------------------ l_uri = G_TEST_HOST+'/test.html' l_headers = {'host': 'limit.com', 'waf-scopes-id': '0050'} for x in range(3): l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is ddos custom response\n' # ------------------------------------------------------ # Make 4 request in 5 sec for fourth scope. # verify if 4th scope was also updated # ------------------------------------------------------ l_uri = G_TEST_HOST+'/test.html' l_headers = {'host': 'test.limit.com', 'waf-scopes-id': '0050'} for x in range(3): l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'custom response for limits from limit_id_2\n' def test_scopes_update(setup_scopez_server_action): #------------------------------------------------------- # check second scope for AN 0051 working correctly # ------------------------------------------------------ l_uri = G_TEST_HOST+'/path.html' l_headers = {'host': 'www.regexhost.com', 'waf-scopes-id':'0051', 'User-Agent': 'bananas'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is from RX scope\n' #------------------------------------------------------- # change the 'path' value for scope and update. # check if update was successful # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_scopes_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes/0051.scopes.json')) try: with open(l_scopes_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_scopes_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf['scopes'][1]['path']['value'] = ".*/test.html" l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') #------------------------------------------------------- # POST conf # ------------------------------------------------------ l_url = '%s/update_scopes'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 #------------------------------------------------------- # make a request with same path '/path.html', # should match GLOB scope # ------------------------------------------------------ l_uri = G_TEST_HOST+'/path.html' l_headers = {'host': 'www.regexhost.com', 'waf-scopes-id':'0051', 'User-Agent': 'bananas'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is from GLOB scope\n' #------------------------------------------------------- # make a request with updated path '/test.html', # should get 403 with custom response # ------------------------------------------------------ l_uri = G_TEST_HOST+'/test.html' l_headers = {'host': 'www.regexhost.com', 'waf-scopes-id':'0051', 'User-Agent': 'bananas'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is from RX scope\n' def test_scopes_linkage_update(setup_scopez_server_action): """ Test linkage update. Update rules config in second scope (0050-scopes.json) to 0050-0gG8osWJ.rules.json from 0050-ZrLf3KkQ.rules.json check if update worked """ #------------------------------------------------------- # check second scope for AN 0050 working correctly # ------------------------------------------------------ l_uri = G_TEST_HOST+'/path.html' l_headers = {'host': 'test.com', 'waf-scopes-id':'0050', 'User-Agent': 'monkeez'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is rules custom response\n' #------------------------------------------------------- # change the 'rules_prod_id' value for second scope # and update. # check if update was successful # ------------------------------------------------------ l_conf = {} l_file_path = os.path.dirname(os.path.abspath(__file__)) l_scopes_conf_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes/0050.scopes.json')) try: with open(l_scopes_conf_path) as l_f: l_conf = json.load(l_f) except Exception as l_e: print('error opening config file: %s. Reason: %s error: %s, doc: %s' % ( l_scopes_conf_path, type(l_e), l_e, l_e.__doc__)) assert False l_conf['scopes'][1]['rules_prod_id'] = "0050-0gG8osWJ" l_conf['last_modified_date'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') #------------------------------------------------------- # POST conf # ------------------------------------------------------ l_url = '%s/update_scopes'%(G_TEST_HOST) l_headers = {'Content-Type': 'application/json'} l_r = requests.post(l_url, headers=l_headers, data=json.dumps(l_conf)) assert l_r.status_code == 200 #------------------------------------------------------- # make the same request. should get 200 # ------------------------------------------------------ l_uri = G_TEST_HOST+'/path.html' l_headers = {'host': 'test.com', 'waf-scopes-id':'0050', 'User-Agent': 'monkeez'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 #assert l_r.text == 'This is from GLOB scope\n' #------------------------------------------------------- # make a request with user-agent bananas # ------------------------------------------------------ l_uri = G_TEST_HOST+'/path.html' l_headers = {'host': 'test.com', 'waf-scopes-id':'0050', 'User-Agent': 'bananas'} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 403 assert l_r.text == 'This is rules custom response\n'
46.445175
127
0.47363
2,428
21,179
3.883855
0.098847
0.015483
0.037328
0.043054
0.777094
0.746872
0.725769
0.70106
0.698515
0.698515
0
0.022994
0.215591
21,179
455
128
46.547253
0.544634
0.317579
0
0.777027
0
0.02027
0.223198
0.05529
0
0
0
0
0.165541
1
0.027027
false
0
0.033784
0
0.064189
0.023649
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
73420aa4f6e3ed9ef36c3abd1b21532c44e9fe5a
65
py
Python
cvdd/preprocessors/__init__.py
altescy/cvdd
57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a
[ "MIT" ]
5
2021-07-11T08:40:43.000Z
2021-07-19T05:08:11.000Z
cvdd/preprocessors/__init__.py
altescy/cvdd
57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a
[ "MIT" ]
null
null
null
cvdd/preprocessors/__init__.py
altescy/cvdd
57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a
[ "MIT" ]
null
null
null
from cvdd.preprocessors.stopwords import Stopwords # noqa: F401
32.5
64
0.815385
8
65
6.625
0.875
0
0
0
0
0
0
0
0
0
0
0.052632
0.123077
65
1
65
65
0.877193
0.153846
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
734ca65a9fb58ba2c1985d18cc4676e3a146663d
75
py
Python
http2py/__init__.py
i2mint/http2py
8715cd9e56f50b868bfd70c699f5363b053a2295
[ "Apache-2.0" ]
1
2020-10-07T03:16:49.000Z
2020-10-07T03:16:49.000Z
http2py/__init__.py
i2mint/http2py
8715cd9e56f50b868bfd70c699f5363b053a2295
[ "Apache-2.0" ]
3
2020-10-12T22:17:39.000Z
2022-01-19T23:59:46.000Z
http2py/__init__.py
i2mint/http2py
8715cd9e56f50b868bfd70c699f5363b053a2295
[ "Apache-2.0" ]
1
2020-10-12T21:28:16.000Z
2020-10-12T21:28:16.000Z
from .client import HttpClient from .cli_maker import mk_cli, dispatch_cli
25
43
0.84
12
75
5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.12
75
2
44
37.5
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
73535a2eec20fe8c9bb71cef477809dcf691bb57
6,466
py
Python
dgx_remote_shell/dgx_remote_shell/test.py
AMXAUNZ/DGX-Remote-Shell
3e4ff141158d0d0dd7dce0a34b13ddba8e1085d4
[ "MIT" ]
null
null
null
dgx_remote_shell/dgx_remote_shell/test.py
AMXAUNZ/DGX-Remote-Shell
3e4ff141158d0d0dd7dce0a34b13ddba8e1085d4
[ "MIT" ]
null
null
null
dgx_remote_shell/dgx_remote_shell/test.py
AMXAUNZ/DGX-Remote-Shell
3e4ff141158d0d0dd7dce0a34b13ddba8e1085d4
[ "MIT" ]
null
null
null
text = """Line 20 (10:11:37):: Command From [5002:3:1]-[$03$0D$0A$1B[1GDGX_SHELL>$1B[11Gshow$0D$0AVersion Info:$0D$0AMCPU$0D$0A BTL: 3.8.0 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x500$0D$0A APP: 2.6.2.2 R $09Nov 15 2014 20:35:43$0D$0A SRM: 1.0.5.1$0D$0ABCPU1 DxLink_input$0D$0A BTL 1.0.0.2 $09HW_ID 0x1 $09Targ_ID 0x0 ] Line 21 (10:11:37):: Command From [5002:3:1]-[ $09SysRevID 0x2 $09FW_ID 0x10C0$0D$0A APP 1.2.3.1 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x10C0$0D$0A EEPROM 1.0.2.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2030$0D$0A EEPROM 3.0.6.0 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2060$0D$0ABCPU] Line 22 (10:11:37):: Command From [5002:3:1]-[2 DxLink_input$0D$0A BTL 1.0.0.2 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x2 $09FW_ID 0x10C0$0D$0A APP 1.2.3.1 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x10C0$0D$0A EEPROM 1.0.2.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2030$0D$0A EEPROM 3.] Line 23 (10:11:37):: Command From [5002:3:1]-[0.6.0 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2060$0D$0ABCPU3 HDMI_input$0D$0A BTL 1.0.0.2 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x2 $09FW_ID 0x1050$0D$0A APP 1.0.8.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x1050$0D$0ABCPU5 DxLink_output] Line 24 (10:11:37):: Command From [5002:3:1]-[$0D$0A BTL 1.0.0.2 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x2 $09FW_ID 0x10D0$0D$0A APP 1.2.2.3 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x10D0$0D$0A EEPROM 1.0.2.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2030$0D$0A EEPROM 3.0.6.0 $09HW_ID 0x0 $09] Line 25 (10:11:37):: Command From [5002:3:1]-[Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2070$0D$0ABCPU6 HDMI_output$0D$0A BTL 1.0.0.2 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x2 $09FW_ID 0x1080$0D$0A APP 1.2.6.2 $09HW_ID 0x1 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x1080$0D$0A FPGA 1.8.0.0 $09HW_ID 0x0 $09Targ_ID 0x] Line 26 (10:11:37):: Command From [5002:3:1]-[0 $09SysRevID 0x1 $09FW_ID 0x2010$0D$0ACENTER $0D$0A BTL 1.0.1.3 $09HW_ID 0x1 $09Targ_ID 0x1 $09SysRevID 0x2 $09FW_ID 0x1030$0D$0A APP 2.0.2.3 $09HW_ID 0x1 $09Targ_ID 0x1 $09SysRevID 0x1 $09FW_ID 0x1030$0D$0APPIC $0D$0A BTL 1.0.1.4 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysR] Line 27 (10:11:37):: Command From [5002:3:1]-[evID 0x2 $09FW_ID 0x1000$0D$0A APP 2.2.2.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x1000$0D$0A EEPROM 1.2.1.1 $09HW_ID 0x0 $09Targ_ID 0x0 $09SysRevID 0x1 $09FW_ID 0x2020$0D$0A$0D$0A$1B[1GDGX_SHELL>$1B[11G"]""" text = """Line 28 (11:12:42):: Command From [5002:3:1]-[$03$0D$0A$1B[1GDGX_SHELL>$1B[11Gchannel$0D$0ABCPU1 Channel: 0 port: 0 No Signal$0D$0ABCPU1 Channel: 1 port: 0 No Signal$0D$0ABCPU1 Channel: 2 port: 0 No Signal$0D$0ABCPU1 Channel: 3 port: 0 No Signal$0D$0ABCPU2 Channel: 0 port: 0 No Signal$0D$0ABCPU2 Channel: 1 port: 0 No Signal$0D$0ABCPU2 Cha] Line 29 (11:12:42):: Command From [5002:3:1]-[nnel: 2 port: 0 No Signal$0D$0ABCPU2 Channel: 3 port: 0 No Signal$0D$0ABCPU3 Channel: 0 port: 0 No Signal$0D$0ABCPU3 Channel: 1 port: 0 No Signal$0D$0ABCPU3 Channel: 2 port: 0 No Signal$0D$0ABCPU3 Channel: 3 port: 0 No Signal$0D$0ABCPU5 Channel: 0 port: 0 No Signal$0D$0ABCPU5 Channel] Line 30 (11:12:42):: Command From [5002:3:1]-[: 1 port: 0 No Signal$0D$0ABCPU5 Channel: 2 port: 0 No Signal$0D$0ABCPU5 Channel: 3 port: 0 No Signal$0D$0ABCPU6 Channel: 0 port: 0 No Signal$0D$0ABCPU6 Channel: 1 port: 0 No Signal$0D$0ABCPU6 Channel: 2 port: 0 No Signal$0D$0ABCPU6 Channel: 3 port: 0 No Signal$0D$0A$1B[1GDGX_SHELL>$1B[1] Line 31 (11:12:42):: Command From [5002:3:1]-[1G]""" text = """Line 1 (11:41:43):: Command From [5002:3:1]-[$03$0D$0A$1B[1GDGX_SHELL>$1B[11Gswitch$0D$0A$1B[1;1H$1B[2J$0D$0A O U T P U T S $0D$0A | 1 2 3 4| 5 6 7 8| 9 10 11 12|13 14 15 16|$0D$0A ---+-----------+-----------+-----------+-----------+$0D$0A 1] Line 2 (11:41:43):: Command From [5002:3:1]-[ |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 2 |>< >< >< ><|>< >< >< ><|>< >< >< ><|>< >< >< ><|$0D$0A 3 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 4 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A ---+-----------+-----------+------] Line 3 (11:41:43):: Command From [5002:3:1]-[-----+-----------+$0D$0A 5 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 6 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 7 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 8 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A ---+------] Line 4 (11:41:43):: Command From [5002:3:1]-[-----+-----------+-----------+-----------+$0D$0A 9 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 10 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 11 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 12 |.. .. .. ..|.. .. .. ..|.. .. .. ..|..] Line 5 (11:41:43):: Command From [5002:3:1]-[ .. .. ..|$0D$0A ---+-----------+-----------+-----------+-----------+$0D$0AI 13 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 14 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0AN 15 |.. .. .. ..|.. .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0A 16 |.. .. .. ..|..] Line 6 (11:41:43):: Command From [5002:3:1]-[ .. .. ..|.. .. .. ..|.. .. .. ..|$0D$0AP ---+-----------+-----------+-----------+-----------+$0D$0A$0D$0A$1B[1GDGX_SHELL>$1B[11G]""" # split into lines lines = text.split('\n') # drop the header #my_line = '' unwanted = [('^13', ''), ('$03', ''), ('1G', ''), ('$1B1;1H$1B2J', ''), ('$0D$0A', '\r'), ('$0D $0A', '\r'), ('$09', '\t'), ('$1BDGX_SHELL>$1B1', 'DGX_SHELL>')] output = '' for line in lines: my_line = line.split('-', 1)[1] my_line = my_line.replace('[', '') my_line = my_line.replace(']', '') output = output + my_line print output for item in unwanted: output = output.replace(item[0], item[1]) print output # remove some unwanted stuff # [ # ] # ^13 # $03 # $1B1;1H$1B2J # replace some stuff # $0D$0A -- \r? # $0D $0A -- \r? # $09 -- tab? # $1B1GDGX_SHELL>$1B11G -- DGX_SHELL> #
95.088235
369
0.523044
1,050
6,466
3.134286
0.131429
0.070495
0.04254
0.079003
0.725919
0.692495
0.659982
0.466424
0.385901
0.34883
0
0.226622
0.211104
6,466
67
370
96.507463
0.418545
0.029694
0
0.108108
0
0.459459
0.914468
0.101199
0
0
0.054516
0
0
0
null
null
0
0
null
null
0.054054
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
1
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
7df89317e7a2eb8c46e0f10944595108c74c31ec
208
py
Python
src/micromind/stamp.py
MicroMedIAn/MicroMind
93e3e91ce4f210cf0a676a79edcdcf04fa6b4818
[ "MIT" ]
1
2021-01-21T13:56:45.000Z
2021-01-21T13:56:45.000Z
src/micromind/stamp.py
MicroMedIAn/MicroMind
93e3e91ce4f210cf0a676a79edcdcf04fa6b4818
[ "MIT" ]
null
null
null
src/micromind/stamp.py
MicroMedIAn/MicroMind
93e3e91ce4f210cf0a676a79edcdcf04fa6b4818
[ "MIT" ]
null
null
null
from datetime import datetime from uuid import uuid4 def timestamp(): return datetime.now() def uuid(): return str(uuid4()) def eventid(): return f'{timestamp()}-{uuid()}'.replace(' ', '-')
13.866667
54
0.634615
25
208
5.28
0.52
0.121212
0
0
0
0
0
0
0
0
0
0.011905
0.192308
208
14
55
14.857143
0.77381
0
0
0
0
0
0.115385
0.105769
0
0
0
0
0
1
0.375
true
0
0.25
0.375
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
5
b43ff864100ed2affdcda1e57f1a8c0d2e1fe34a
25
py
Python
tests/var/add_self.py
Mieschendahl/assignment-final-stub
19eea657fcc4f8a455c42028f34b918628514cc0
[ "MIT" ]
null
null
null
tests/var/add_self.py
Mieschendahl/assignment-final-stub
19eea657fcc4f8a455c42028f34b918628514cc0
[ "MIT" ]
1
2022-03-20T11:08:45.000Z
2022-03-20T11:08:45.000Z
tests/var/add_self.py
Mieschendahl/assignment-final-stub
19eea657fcc4f8a455c42028f34b918628514cc0
[ "MIT" ]
6
2022-03-13T13:10:25.000Z
2022-03-28T22:18:12.000Z
x = 3 x = x + x print(x)
6.25
9
0.44
7
25
1.571429
0.428571
0.363636
0
0
0
0
0
0
0
0
0
0.0625
0.36
25
3
10
8.333333
0.625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b442c4548755c9f1e23eb51e2779fd26c34bd10c
3,944
py
Python
ddcz/migrations/0014_auto_20180614_0032.py
ItIsI-Orient/graveyard
4b1c1be90b6f1362aa43c65899bb3676ffccbeb9
[ "MIT" ]
null
null
null
ddcz/migrations/0014_auto_20180614_0032.py
ItIsI-Orient/graveyard
4b1c1be90b6f1362aa43c65899bb3676ffccbeb9
[ "MIT" ]
null
null
null
ddcz/migrations/0014_auto_20180614_0032.py
ItIsI-Orient/graveyard
4b1c1be90b6f1362aa43c65899bb3676ffccbeb9
[ "MIT" ]
null
null
null
# Generated by Django 2.0.2 on 2018-06-13 22:32 import ddcz.models.magic from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ddcz', '0013_auto_20180614_0021'), ] operations = [ migrations.AlterField( model_name='creationvotes', name='opraveno', field=ddcz.models.magic.MisencodedCharField(max_length=1), ), migrations.AlterField( model_name='creationvotes', name='rubrika', field=ddcz.models.magic.MisencodedCharField(max_length=20), ), migrations.AlterField( model_name='creativepage', name='name', field=ddcz.models.magic.MisencodedCharField(max_length=30), ), migrations.AlterField( model_name='creativepageconcept', name='text', field=ddcz.models.magic.MisencodedTextField(), ), migrations.AlterField( model_name='creativepagesection', name='name', field=ddcz.models.magic.MisencodedCharField(max_length=30), ), migrations.AlterField( model_name='monster', name='bojovnost', field=ddcz.models.magic.MisencodedCharField(blank=True, max_length=50, null=True), ), migrations.AlterField( model_name='monster', name='datum', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='monster', name='inteligence', field=ddcz.models.magic.MisencodedCharField(blank=True, max_length=50, null=True), ), migrations.AlterField( model_name='monster', name='oc', field=ddcz.models.magic.MisencodedTextField(), ), migrations.AlterField( model_name='monster', name='odl', field=ddcz.models.magic.MisencodedCharField(max_length=3), ), migrations.AlterField( model_name='monster', name='poh', field=ddcz.models.magic.MisencodedTextField(blank=True, null=True), ), migrations.AlterField( model_name='monster', name='pokl', field=ddcz.models.magic.MisencodedTextField(blank=True, null=True), ), migrations.AlterField( model_name='monster', name='popis', field=ddcz.models.magic.MisencodedTextField(), ), migrations.AlterField( model_name='monster', name='pres', field=ddcz.models.magic.MisencodedTextField(blank=True, null=True), ), migrations.AlterField( model_name='monster', name='skupina', field=ddcz.models.magic.MisencodedTextField(), ), migrations.AlterField( model_name='monster', name='sm', field=ddcz.models.magic.MisencodedCharField(db_column='SM', max_length=50), ), migrations.AlterField( model_name='monster', name='uc', field=ddcz.models.magic.MisencodedTextField(), ), migrations.AlterField( model_name='monster', name='vel', field=ddcz.models.magic.MisencodedCharField(max_length=20), ), migrations.AlterField( model_name='monster', name='zkus', field=ddcz.models.magic.MisencodedCharField(max_length=50), ), migrations.AlterField( model_name='monster', name='zran', field=ddcz.models.magic.MisencodedTextField(blank=True, null=True), ), migrations.AlterField( model_name='monster', name='zvt', field=ddcz.models.magic.MisencodedTextField(), ), ]
32.866667
94
0.563387
344
3,944
6.351744
0.203488
0.09611
0.144165
0.278719
0.836156
0.800458
0.729977
0.648055
0.648055
0.570252
0
0.01827
0.31998
3,944
119
95
33.142857
0.796421
0.01141
0
0.690265
1
0
0.080831
0.005902
0
0
0
0
0
1
0
false
0
0.017699
0
0.044248
0
0
0
0
null
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b45eff381f8b9068052052380d35a3ba36759281
318
py
Python
utils/autoreload.py
huangbop/bosk
b3d37ae3bd15e091e1a0764ee8d39f4790628765
[ "MIT" ]
null
null
null
utils/autoreload.py
huangbop/bosk
b3d37ae3bd15e091e1a0764ee8d39f4790628765
[ "MIT" ]
null
null
null
utils/autoreload.py
huangbop/bosk
b3d37ae3bd15e091e1a0764ee8d39f4790628765
[ "MIT" ]
null
null
null
def restart_with_reloader(): while True: def python_reloader(main_func, *args, **kwargs): try: exit_code = restart_with_reloader() except: pass def main(main_func, *args, **kwargs): reloader = python_reloader python_reloader(main_func, *(), **{})
9.352941
48
0.584906
34
318
5.147059
0.470588
0.24
0.217143
0.251429
0
0
0
0
0
0
0
0
0.298742
318
33
49
9.636364
0.784753
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0.1
0
null
null
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
5
b46dde60761010263d79bda17f9c05790d13ea58
143
py
Python
repeater/__init__.py
Lukasa/pyrc
71ecd506e47e28e0f6649ea5993532cf87924d96
[ "MIT" ]
1
2017-12-02T23:14:12.000Z
2017-12-02T23:14:12.000Z
repeater/__init__.py
Lukasa/pyrc
71ecd506e47e28e0f6649ea5993532cf87924d96
[ "MIT" ]
null
null
null
repeater/__init__.py
Lukasa/pyrc
71ecd506e47e28e0f6649ea5993532cf87924d96
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ repeater/ ~~~~~~~~~ This module defines the basic webserver that powers pyrc. """ from .server import application
15.888889
57
0.65035
17
143
5.470588
1
0
0
0
0
0
0
0
0
0
0
0.008333
0.160839
143
8
58
17.875
0.766667
0.706294
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b473fdd7df4655afa3929299183ddcaa18cee090
51
py
Python
ch1/Ex_1.3.4.1/task02.py
lyskevin/cpbook-code
027f77933428d7688f935800ffa9109794e429b1
[ "UPL-1.0" ]
1,441
2018-12-03T23:46:17.000Z
2022-03-29T06:36:43.000Z
ch1/Ex_1.3.4.1/task02.py
lyskevin/cpbook-code
027f77933428d7688f935800ffa9109794e429b1
[ "UPL-1.0" ]
53
2018-12-11T13:50:35.000Z
2022-03-20T04:30:39.000Z
ch1/Ex_1.3.4.1/task02.py
lyskevin/cpbook-code
027f77933428d7688f935800ffa9109794e429b1
[ "UPL-1.0" ]
420
2018-12-04T11:22:08.000Z
2022-03-27T15:25:33.000Z
from math import pi print(round(pi, int(input())))
17
30
0.705882
9
51
4
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.117647
51
2
31
25.5
0.8
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
b47ad9bdf9009dfd71eb1f2fdcab5a6a14db3f18
82
py
Python
geo_agent/infection_functions/infect_init.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
geo_agent/infection_functions/infect_init.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
geo_agent/infection_functions/infect_init.py
kevjp/openstreetmap-carto
be30cfe8d73f78cb4b5ba9acaaf42a942c70270d
[ "CC0-1.0" ]
null
null
null
class Infectioninit(): def time_dependent_prob(self): return 0.5
9.111111
34
0.634146
10
82
5
1
0
0
0
0
0
0
0
0
0
0
0.033898
0.280488
82
8
35
10.25
0.813559
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
c348901a1d52222579c28581d8a50680b611181d
96
py
Python
inPress/datecheck.py
hadimuhammad/InPress
c572ae8a227aa36160add52c3a7e75d8d6acb2dc
[ "FSFAP" ]
null
null
null
inPress/datecheck.py
hadimuhammad/InPress
c572ae8a227aa36160add52c3a7e75d8d6acb2dc
[ "FSFAP" ]
null
null
null
inPress/datecheck.py
hadimuhammad/InPress
c572ae8a227aa36160add52c3a7e75d8d6acb2dc
[ "FSFAP" ]
null
null
null
from datetime import date print date.today() if ("2013-01-01" < date.today()): print "true"
16
33
0.666667
15
96
4.266667
0.666667
0.28125
0
0
0
0
0
0
0
0
0
0.1
0.166667
96
6
34
16
0.7
0
0
0
0
0
0.14433
0
0
0
0
0
0
0
null
null
0
0.25
null
null
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
c35fd43d7cfa01b6bc2880868b2605fe020c173f
171
py
Python
reports/admin.py
j-windsor/cs3240-f15-team21-v2
ec95c6629b84e7e0f52d34990ec8bec863d90a7b
[ "MIT" ]
null
null
null
reports/admin.py
j-windsor/cs3240-f15-team21-v2
ec95c6629b84e7e0f52d34990ec8bec863d90a7b
[ "MIT" ]
null
null
null
reports/admin.py
j-windsor/cs3240-f15-team21-v2
ec95c6629b84e7e0f52d34990ec8bec863d90a7b
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Report, Folder, Contributor admin.site.register(Report) admin.site.register(Folder) admin.site.register(Contributor)
24.428571
47
0.824561
23
171
6.130435
0.478261
0.191489
0.361702
0
0
0
0
0
0
0
0
0
0.081871
171
6
48
28.5
0.898089
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
5ede8bea43c2854fd6dd3de2baba5b17b347a515
71
py
Python
pyforc/core/__init__.py
peytondmurray/PyFORC
743b39ee03d5eca90b326c47fcb1c04a2726cbfa
[ "MIT" ]
2
2018-11-08T16:11:15.000Z
2021-11-21T23:21:34.000Z
pyforc/core/__init__.py
peytondmurray/PyFORC
743b39ee03d5eca90b326c47fcb1c04a2726cbfa
[ "MIT" ]
1
2021-04-03T03:01:37.000Z
2021-04-03T03:01:37.000Z
pyforc/core/__init__.py
peytondmurray/PyFORC
743b39ee03d5eca90b326c47fcb1c04a2726cbfa
[ "MIT" ]
2
2021-04-03T01:29:48.000Z
2022-02-17T20:55:36.000Z
from . import config, coordinates, forc, forcdata, ingester, ops, plot
35.5
70
0.760563
9
71
6
1
0
0
0
0
0
0
0
0
0
0
0
0.140845
71
1
71
71
0.885246
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5efcff3a96fce06b393c3ec4e54eefdb3e383667
288
py
Python
tests/functional/conftest.py
gbvanrenswoude/awsprocesscreds
99c8a20361ebd495d057e7960e5ef4578d1b3780
[ "Apache-2.0" ]
136
2017-11-29T19:46:40.000Z
2022-02-13T08:03:00.000Z
tests/functional/conftest.py
gbvanrenswoude/awsprocesscreds
99c8a20361ebd495d057e7960e5ef4578d1b3780
[ "Apache-2.0" ]
39
2017-11-29T17:32:09.000Z
2021-11-24T05:41:56.000Z
tests/functional/conftest.py
gbvanrenswoude/awsprocesscreds
99c8a20361ebd495d057e7960e5ef4578d1b3780
[ "Apache-2.0" ]
57
2017-11-29T16:43:54.000Z
2022-03-25T15:41:02.000Z
import mock import pytest import requests @pytest.fixture(autouse=True) def mock_requests_session(monkeypatch): session = mock.Mock(spec=requests.Session) session_cls = mock.Mock(return_value=session) monkeypatch.setattr("requests.Session", session_cls) return session
22.153846
56
0.777778
36
288
6.083333
0.416667
0.205479
0.200913
0.228311
0
0
0
0
0
0
0
0
0.131944
288
12
57
24
0.876
0
0
0
0
0
0.055556
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
48abc6e45cbd078838cb65fb0454c7bd58261b01
32
py
Python
deep_disfluency/corpus/test.py
treena908/deep_disfluency
4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59
[ "MIT" ]
null
null
null
deep_disfluency/corpus/test.py
treena908/deep_disfluency
4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59
[ "MIT" ]
null
null
null
deep_disfluency/corpus/test.py
treena908/deep_disfluency
4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59
[ "MIT" ]
null
null
null
print('hello world') print('hi')
16
20
0.6875
5
32
4.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.0625
32
2
21
16
0.733333
0
0
0
0
0
0.393939
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
5b128e4a19d912e40496cb20685172f3583c021b
286
py
Python
desafiosCursoEmVideo/ex049.py
gomesGabriel/Pythonicos
b491cefbb0479dd83fee267304d0fa30b99786a5
[ "MIT" ]
1
2019-09-02T12:14:58.000Z
2019-09-02T12:14:58.000Z
desafiosCursoEmVideo/ex049.py
gomesGabriel/Pythonicos
b491cefbb0479dd83fee267304d0fa30b99786a5
[ "MIT" ]
null
null
null
desafiosCursoEmVideo/ex049.py
gomesGabriel/Pythonicos
b491cefbb0479dd83fee267304d0fa30b99786a5
[ "MIT" ]
null
null
null
print('\033[33m-=-\033[m' * 20) print('\033[33m************* Outra tabuada *************\033[m') print('\033[33m-=-\033[m' * 20) v = int(input('Insira um valor: ')) print('A tabuada do {} é: ' .format(v)) for c in range(0, 10): print('{} * {} = {}' .format(c, v, c*v)) print('Fim')
31.777778
64
0.5
46
286
3.108696
0.521739
0.167832
0.230769
0.195804
0.237762
0.237762
0
0
0
0
0
0.126531
0.143357
286
8
65
35.75
0.457143
0
0
0.25
0
0
0.48951
0.073427
0
0
0
0
0
1
0
false
0
0
0
0
0.75
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
5b22c7207f317267e44074fba8cef32183d1e63b
73
py
Python
__init__.py
mcandemir/fetch-boring-stuff
67cab916794dab2f969244ae2c5552573ea91dc3
[ "MIT" ]
null
null
null
__init__.py
mcandemir/fetch-boring-stuff
67cab916794dab2f969244ae2c5552573ea91dc3
[ "MIT" ]
null
null
null
__init__.py
mcandemir/fetch-boring-stuff
67cab916794dab2f969244ae2c5552573ea91dc3
[ "MIT" ]
1
2021-09-09T09:14:10.000Z
2021-09-09T09:14:10.000Z
from scripts.fetcher import Fetcher fetcher = Fetcher() fetcher.start()
14.6
35
0.780822
9
73
6.333333
0.555556
0.736842
0.736842
0
0
0
0
0
0
0
0
0
0.123288
73
4
36
18.25
0.890625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
d2c45c86d30dc86862f4e85877bc02cc24e0f6f9
56,976
py
Python
paxes_nova/tests/virt/ibmpowervm/ivm/test_operator_ivm.py
windskyer/k_nova
63579dbfcfcda5def5b588a6728bfff85ad4564e
[ "Apache-2.0" ]
null
null
null
paxes_nova/tests/virt/ibmpowervm/ivm/test_operator_ivm.py
windskyer/k_nova
63579dbfcfcda5def5b588a6728bfff85ad4564e
[ "Apache-2.0" ]
null
null
null
paxes_nova/tests/virt/ibmpowervm/ivm/test_operator_ivm.py
windskyer/k_nova
63579dbfcfcda5def5b588a6728bfff85ad4564e
[ "Apache-2.0" ]
null
null
null
''' Created on Apr 20, 2015 @author: root ''' import mock import testtools from paxes_nova.virt.ibmpowervm.ivm import operator from paxes_nova.virt.ibmpowervm.ivm.common import Connection from paxes_nova.virt.ibmpowervm.ivm import exception from decimal import Decimal from paxes_nova.virt.ibmpowervm.ivm.operator import IVMOperator class IVMOperatorTestCase(testtools.TestCase): def setUp(self): super(IVMOperatorTestCase, self).setUp() conn = Connection('172.24.23.212', 'root', 'teamsun') self.ivm_opt = operator.IVMOperator(conn) self.rasis = exception def tearDown(self): super(IVMOperatorTestCase, self).tearDown() @mock.patch('nova.openstack.common.processutils.execute') def test_run_interactive(self, mock_processutils_execute): cmd = ['oem_setup_env', 'lsvg -o', 'exit'] mock_processutils_execute.return_value = ('datavg\nrootvg\n', '') self.assertEqual(self.ivm_opt.run_interactive(cmd), []) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_interactive') def test_get_wwpns(self, mock_run_interactive): mock_run_interactive.return_value = ([' Network Address.............10000090FA1B2436', ' Network Address.............10000090FA1B2437', ' Network Address.............10000090FA1B2874', ' Network Address.............10000090FA1B2875', ]) self.assertEqual(self.ivm_opt.get_wwpns(), ['10000090FA1B2436', '10000090FA1B2437', '10000090FA1B2874', '10000090FA1B2875', ]) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_interactive') def test_get_device_name_by_wwpn(self, mock_run_interactive): mock_run_interactive.return_value = ([' fcs2 U78AB.001.WZSJH1H-P1-C3-T1 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A249002C8', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2436', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2436', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C3-T1', '', ' fcs3 U78AB.001.WZSJH1H-P1-C3-T2 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A249002C8', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2437', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2437', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C3-T2', '', ' fcs0 U78AB.001.WZSJH1H-P1-C2-T1 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A2490024E', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2874', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2874', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C2-T1', '', ' fcs1 U78AB.001.WZSJH1H-P1-C2-T2 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A2490024E', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2875', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2875', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C2-T2', '', '', ' PLATFORM SPECIFIC', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C2-T1', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0,1', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C2-T2', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C3-T1', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0,1', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C3-T2']) wwpn = '10000090FA1B2436' self.assertEqual(self.ivm_opt.get_device_name_by_wwpn(wwpn), 'fcs2') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_vopt_size(self, mock_run_command): mock_run_command.return_value = ('6515') self.assertEqual(self.ivm_opt.get_vopt_size(), '6515') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_staging_size(self, mock_run_command): mock_run_command.return_value = (['9613688']) self.assertEqual(self.ivm_opt.get_vopt_size(), ['9613688']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_actual_lpar_name') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_get_lpar_mem(self, mock_run_vios_command, mock_get_actual_lpar_name): lpar_name = 'instane-0000011f' mock_get_actual_lpar_name.return_value = ('instane-0000011f') mock_run_vios_command.return_value = (['lpar_name=instane-0000011f', 'lpar_id=26', 'mem_mode=ded', 'curr_min_mem=512', 'curr_mem=1024', 'curr_max_mem=4096', 'pend_min_mem=512', 'pend_mem=1024', 'pend_max_mem=4096', 'run_min_mem=0', 'run_mem=1024']) self.assertEqual(self.ivm_opt.get_lpar_mem(lpar_name), {'lpar_name': 'instane-0000011f'}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_get_lpar_proc(self, mock_run_vios_command): lpar_name = 'instane-0000011f' mock_run_vios_command.return_value = (['lpar_name=instane-0000011f', 'lpar_id=26', 'curr_shared_proc_pool_id=0', 'curr_proc_mode=shared', ]) self.assertEqual(self.ivm_opt.get_lpar_proc(lpar_name), {'lpar_name': 'instane-0000011f'}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_lpar_cpu_util(self, mock_run_command): lpar_id = 26 mock_run_command.return_value = (['128,1,1024', '128,1,1024', '128,1,1024' ]) self.assertEqual(self.ivm_opt.get_lpar_cpu_util(lpar_id), 0) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_cpu_info_for_lpars(self, mock_run_command): mock_run_command.return_value = (['instance-00000119,shared,0.50,1', 'instance-00000121,shared,0.20,1', 'instance-0000015b,shared,0.50,1', 'instance-00000001,shared,0.50,1']) self.assertEqual(self.ivm_opt.get_cpu_info_for_lpars(), {'instance-00000119': Decimal('0.50'), 'instance-00000121': Decimal('0.20'), 'instance-0000015b': Decimal('0.50'), 'instance-00000001': Decimal('0.50') }) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator._calc_cpu_utilization') def test_get_lpar_info(self, mock_calc_cpu_utilization, mock_run_command): lpar_id = 26 mock_run_command.return_value = ['entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330', 'entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330', 'entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330'] mock_calc_cpu_utilization.return_value = (0) self.assertEqual(self.ivm_opt.get_lpar_info(lpar_id), {'capped_cycles': '317971615126', 'curr_cpu_util': '0.0', 'entitled_cycles': '283424875907440', 'uncapped_cycles': '18638892330'}) # @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') # @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator._calc_cpu_utilization') # def test_get_lpar_info_fault(self, mock_calc_cpu_utilization, mock_run_command): # lpar_id = 26 # ex_args = {'command': 'lslparutil --filter "lpar_ids=26" -n 3 -r lpar', # 'error': ['entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330', # 'entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330'] # } # mock_run_command.return_value = ['entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330', # 'entitled_cycles=283424875907440,capped_cycles=317971615126,uncapped_cycles=18638892330'] # mock_calc_cpu_utilization.return_value = (0) # self.assertRaises(self.rasis.IBMPowerVMCommandFailed, self.ivm_opt.get_lpar_info(lpar_id), ex_args) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_disk_names_for_vhost(self, mock_run_command): vhost = 'vhost1' mock_run_command.return_value = (['0x8200000000000000:/var/vio/VMLibrary/c53b6b58-4eca-8e90-c3e9e0f0babb:0x8100000000000000:lv16']) self.assertEqual(self.ivm_opt.get_disk_names_for_vhost(vhost, local=True), ['lv16']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_hdisk_reserve_policy(self, mock_run_command): diskname = 'hdisk4' mock_run_command.return_value = (['value', ' ', 'no_reserve']) self.assertEqual(self.ivm_opt.get_hdisk_reserve_policy(diskname), 'no_reserve') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_management_sys_name(self, mock_run_command): mock_run_command.return_value = (['Server-8246-L2D-SN06052EA']) self.assertEqual(self.ivm_opt.get_management_sys_name(), 'Server-8246-L2D-SN06052EA') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_actual_lpar_name') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_refcode(self, mock_run_command, mock_get_actual_lpar_name): instance_name = 'instance-0000011f' mock_run_command.return_value = (['Linux ppc64,04/09/2015 10:44:57']) mock_get_actual_lpar_name.return_value = ('instance-0000011f') self.assertEqual(self.ivm_opt.get_refcode(instance_name), 'Linux ppc64') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_actual_lpar_name') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_live_migration_state(self, mock_run_command, mock_get_actual_lpar_name): inst_name = 'instance-0000011f' mock_run_command.return_value = (['Not Migrating']) mock_get_actual_lpar_name.return_value = ('instance-0000011f') self.assertEqual(self.ivm_opt.get_live_migration_state(inst_name), 'Not Migrating') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_lpar_proc_compat_modes(self, mock_run_command): mock_run_command.return_value = (['"default,POWER6,POWER6+,POWER7"']) self.assertEqual(self.ivm_opt.get_lpar_proc_compat_modes(), ['default', 'POWER6', 'POWER6+', 'POWER7']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_actual_lpar_name') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_get_curr_and_desired_proc_compat_modes(self, mock_run_vios_command, mock_get_actual_lpar_name): instance_name = 'instance-0000011f' mock_run_vios_command.return_value = (['POWER7,default']) mock_get_actual_lpar_name.return_value = ('instance-0000011f') self.assertEqual(self.ivm_opt.get_curr_and_desired_proc_compat_modes(instance_name), ['POWER7', 'default']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_actual_lpar_name') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_get_lpar_operating_system(self, mock_run_vios_command, mock_get_actual_lpar_name): instance_name = 'instance-0000011f' mock_run_vios_command.return_value = (['0.0.0.0.0.0']) mock_get_actual_lpar_name.return_value = ('instance-0000011f') self.assertEqual(self.ivm_opt.get_curr_and_desired_proc_compat_modes(instance_name), ['0.0.0.0.0.0']) def test_get_disk_names_for_vhost_frm_dict(self): vhost_id = 3 disk_dict = {1:'15', 2:'80', 3:'123'} self.assertEqual(self.ivm_opt.get_disk_names_for_vhost_frm_dict(vhost_id, disk_dict), []) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_disk_uid_by_name(self, mock_run_command): disk_name = 'hdisk3' mock_run_command.return_value = (['332136005076300818001A000000000000D4F04214503IBMfcp']) self.assertEqual(self.ivm_opt.get_disk_uid_by_name(disk_name), '6005076300818001A000000000000D4F') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_disk_names_for_vhost_frm_dict') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_disk_uid_by_name') def test_get_volumes_by_vhost_from_dict(self, mock_get_disk_uid_by_name, mock_get_disk_names_for_vhost_frm_dict): vhost_id = 3 disk_dict = {1:'15', 2:'80', 3:'123'} mock_get_disk_uid_by_name.return_value = ('6005076300818001A000000000000D4F') mock_get_disk_names_for_vhost_frm_dict.return_value = ([]) self.assertEqual(self.ivm_opt.get_volumes_by_vhost_from_dict(vhost_id, disk_dict), []) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_get_vhost_by_instance_id(self, mock_run_vios_command): instance_id = 17 mock_run_vios_command.return_value = (['vhost15']) self.assertEqual(self.ivm_opt.get_vhost_by_instance_id(instance_id), 'vhost15') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator._get_all_virt_slots_in_use') def test_get_num_reserved_in_use_vios_slots(self, mock__get_all_virt_slots_in_use, mock_run_command): managed_lpar_names = ['06-052EA', '06-052EB'] mock_run_command.return_value = (['0,serial', '1,serial', '2,scsi', '3,reserved', '32,eth']) mock__get_all_virt_slots_in_use.return_value = (1) self.assertEqual(self.ivm_opt.get_num_reserved_in_use_vios_slots(managed_lpar_names), (4, 1)) def test_get_volume_aix_conn_info(self): volume_data = {'target_wwn': ['10000090FA1B2436', '10000090FA1B2437', '10000090FA1B2874', '10000090FA1B2875'], 'target_lun': '10'} self.assertEqual(self.ivm_opt.get_volume_aix_conn_info(volume_data), ['10000090fa1b2436,a000000000000', '10000090fa1b2437,a000000000000', '10000090fa1b2874,a000000000000', '10000090fa1b2875,a000000000000']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_devname_by_aix_conn(self, mock_run_command): conn_info = ['5005076803080067,2000000000000'] mock_run_command.return_value = (['Enabled:hdisk4:fscsi2:5005076803080067,2000000000000']) self.assertEqual(self.ivm_opt.get_devname_by_aix_conn(conn_info), {'device_name': 'hdisk4'}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_fcs_parent_devices(self, mock_run_command): mock_run_command.return_value = (['pci4:fcs0', 'pci4:fcs1', 'pci5:fcs2', 'pci5:fcs3']) self.assertEqual(self.ivm_opt.get_fcs_parent_devices(), {'pci4': ['fcs0', 'fcs1'], 'pci5': ['fcs2', 'fcs3']}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_interactive') def test_get_fcs_device_names(self, mock_run_interactive): wwpns = ['10000090FA1B2436', '10000090FA1B2437', '10000090FA1B2874', '10000090FA1B2875'] mock_run_interactive.return_value = ([' fcs2 U78AB.001.WZSJH1H-P1-C3-T1 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A249002C8', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2436', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2436', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C3-T1', '', ' fcs3 U78AB.001.WZSJH1H-P1-C3-T2 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A249002C8', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2437', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2437', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C3-T2', '', ' fcs0 U78AB.001.WZSJH1H-P1-C2-T1 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A2490024E', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2874', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2874', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C2-T1', '', ' fcs1 U78AB.001.WZSJH1H-P1-C2-T2 4Gb FC PCI Express Adapter (df1000fe)', '', ' Part Number.................00E0807', ' Serial Number...............1A2490024E', ' Manufacturer................001A', ' EC Level.................... D77162', ' Customer Card ID Number.....5774', ' Manufacturer................001', ' FRU Number.................. 00E0807', ' Device Specific.(ZM)........3', ' Network Address.............10000090FA1B2875', ' ROS Level and ID............02E8277F', ' Device Specific.(Z0)........2057706D', ' Device Specific.(Z1)........00000000', ' Device Specific.(Z2)........00000000', ' Device Specific.(Z3)........03000909', ' Device Specific.(Z4)........FFE01212', ' Device Specific.(Z5)........02E8277F', ' Device Specific.(Z6)........06E12715', ' Device Specific.(Z7)........07E1277F', ' Device Specific.(Z8)........20000090FA1B2875', ' Device Specific.(Z9)........ZS2.71X15', ' Device Specific.(ZA)........Z1F2.70A5 ', ' Device Specific.(ZB)........Z2F2.71X15', ' Device Specific.(ZC)........00000000', ' Hardware Location Code......U78AB.001.WZSJH1H-P1-C2-T2', '', '', ' PLATFORM SPECIFIC', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C2-T1', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0,1', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C2-T2', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C3-T1', '', ' Name: fibre-channel', ' Model: LPe11002', ' Node: fibre-channel@0,1', ' Device Type: fcp', ' Physical Location: U78AB.001.WZSJH1H-P1-C3-T2']) self.assertEqual(self.ivm_opt.get_fcs_device_names(wwpns), ['fcs2', 'fcs3', 'fcs0', 'fcs1']) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_disk_name_by_volume_uid(self, mock_run_command): uid = 'D5304214503' mock_run_command.return_value = (['hdisk4:332136005076300818001A000000000000D5304214503IBMfcp']) self.assertEqual(self.ivm_opt.get_disk_name_by_volume_uid(uid), 'hdisk4') @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_lpar_max_virtual_slots(self, mock_run_command): lpar_id = '26' mock_run_command.return_value = ([64]) self.assertEqual(self.ivm_opt.get_lpar_max_virtual_slots(lpar_id), 64) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.get_lpar_max_virtual_slots') def test_get_vios_max_virt_slots(self, mock_get_lpar_max_virtual_slots): mock_get_lpar_max_virtual_slots.return_value = (64) self.assertEqual(self.ivm_opt.get_vios_max_virt_slots(), 64) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_hyp_capability(self, mock_run_command): mock_run_command.return_value = (["active_lpar_mobility_capable,inactive_lpar_mobility_capable,cod_proc_capable,vet_activation_capable,shared_proc_capable,active_lpar_share_idle_procs_capable,micro_lpar_capable,dlpar_mem_capable,assign_phys_io_capable,lpar_avail_priority_capable,lpar_proc_compat_mode_capable,virtual_fc_capable,active_mem_sharing_capable"]) self.assertEqual(self.ivm_opt.get_hyp_capability(), {'active_lpar_mobility_capable': True, 'inactive_lpar_mobility_capable': True}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_migration_stats(self, mock_run_command): mock_run_command.return_value = (['64,8,0,0']) self.assertEqual(self.ivm_opt.get_migration_stats(), {'inactv_migr_sup': 64, 'actv_migr_supp': 8, 'inactv_migr_prg': 0, 'actv_migr_prg': 0}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_inactv_migration_stats(self, mock_run_command): mock_run_command.return_value = (['64,0']) self.assertEqual(self.ivm_opt.get_inactv_migration_stats(), {'inactv_migr_sup': 64, 'inactv_migr_prg': 0}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_get_actv_migration_stats(self, mock_run_command): mock_run_command.return_value = (['8,0']) self.assertEqual(self.ivm_opt.get_actv_migration_stats(), {'actv_migr_supp': 8, 'actv_migr_prg': 0}) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_check_vopt_exists(self, mock_run_command): name = 'RHEL6.5-2013-Server-ppc64-DVD.iso' mock_run_command.return_value = (['10d2f623-4225-45ec-bc46-4b98ab7c65b3', '61702c3b-001b-4311-850e-cab2f016add1', '6c0ec0e7-7655-4932-bcca-cb30a6356fab', '6df387ae-6be8-40b8-a1d2-6d3633a0fc24', '753ce55f-4983-4e7e-99c0-ce7e497f559f', '8354e5a1-91dd-47fd-b3a0-7cf8f057cb43', '8354e5a1-91dd-47fd-b3a0-7cf8f057cb43', 'RHEL-7.0-20140507.0-Server-ppc64-dvd1.iso', 'RHEL6.4-20130130.0-Server-ppc64-DVD1.', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'RHEL6.5-2013-Server-ppc64-DVD.iso', 'b3ec3547-307d-4a58-9c69-4955f6df2059', 'c53b6b58-e6f3-4eca-8e90-c3e9e0f0babb', 'd0ed1883-2812-4513-b3f5-092ee8adecd3', 'e4efe10b-34e6-4bb2-b139-0d9a54e55456', 'fdf9b77e-e62c-4fc6-a52a-c355217adaea', 'fdf9b77e-e62c-4fc6-a52a-c355217adaea', 'vopt_06a5f3344fd1402d9bf5bc2c2a5bff41', 'vopt_1e4ea2dd2afa46e89fd0a3234336157e', 'vopt_2a37ccafa8f343a2a08ef119d7b7513b', 'vopt_36e1d56b897946d78293a4594362b884', 'vopt_53de1f8e17d44c1395c4c9b3a4d603fe', 'vopt_59e46a072d484fd9a8c5cdded8214aa4', 'vopt_5e6b50fa0c7e4befa2a537706544e07b', 'vopt_60056ead188a447fbbf51f0dc416627f', 'vopt_6490f79f8e2049018e817fdb75a2cc79', 'vopt_6572e1870a9a4b4993f34aae5d351e4d', 'vopt_7c3085768c9e4a0ab796fa7148b58824', 'vopt_83f111feb8464f5aa0e2702d9cad54ae', 'vopt_c1e896b6656f4c94a70036ae4c518795', 'vopt_d0fd2eed31034148a54659b2987e5ded', ]) self.assertEqual(self.ivm_opt.check_vopt_exists(name), True) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_vios_command') def test_prepare_vhost_dev_lun_dict(self, mock_run_vios_command): mock_run_vios_command.return_value = (['vhost0:0x00000002:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp2vd2', 'vhost1:0x00000003:0x8200000000000000: :0x8100000000000000:lv02', 'vhost2:0x00000004:0x8200000000000000: :0x8100000000000000:lv00', 'vhost3:0x00000005:0x8200000000000000:/var/vio/VMLibrary/RHEL6.4-20130130.0-Server-ppc64-DVD1.:0x8100000000000000:lv01', 'vhost4:0x00000006:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lv04:0x8300000000000000:lv04_ext', 'vhost5:0x00000007:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lv05', 'vhost6:0x0000000f:0x8200000000000000:/var/vio/VMLibrary/dc5e181d-797d-4e89-8b72-05e6d87519d5:0x8100000000000000:lv20', 'vhost7:0x00000008:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp8vd1', 'vhost8:0x0000000a:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp10vd1', 'vhost10:0x0000000b:0x8200000000000000:/var/vio/VMLibrary/vopt_83f111feb8464f5aa0e2702d9cad54ae:0x8100000000000000:lv03', 'vhost11:0x0000000d:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp13vd1', 'vhost12:0x0000000c:0x8200000000000000:/var/vio/VMLibrary/vopt_d0fd2eed31034148a54659b2987e5ded:0x8100000000000000:lv09', 'vhost13:0x0000000e:0x8200000000000000:/var/vio/VMLibrary/8354e5a1-91dd-47fd-b3a0-7cf8f057cb43:0x8100000000000000:lv14', 'vhost14:0x00000010:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp16vd1', 'vhost15:0x00000011:0x8200000000000000:/var/vio/VMLibrary/e4efe10b-34e6-4bb2-b139-0d9a54e55456:0x8100000000000000:lv11', 'vhost16:0x00000012:0x8200000000000000:/var/vio/VMLibrary/6c0ec0e7-7655-4932-bcca-cb30a6356fab:0x8100000000000000:lv10', 'vhost17:0x00000013:0x8200000000000000:/var/vio/VMLibrary/8354e5a1-91dd-47fd-b3a0-7cf8f057cb43:0x8100000000000000:lv13', 'vhost18:0x00000014:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp20vd1', 'vhost19:0x00000015:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lp21vd1', 'vhost21:0x00000017:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:test-paxes', 'vhost22:0x00000018:0x8200000000000000:/var/vio/VMLibrary/b0d6fcc9-85ee-464e-af68-afdb921701af:0x8100000000000000:lv12', 'vhost23:0x00000019:0x8200000000000000:/var/vio/VMLibrary/RHEL-7.0-20140507.0-Server-ppc64-dvd1.iso:0x8100000000000000:lp25vd1', 'vhost24:0x0000001a:0x8200000000000000:/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso:0x8100000000000000:lv06', 'vhost25:0x0000001b:0x8200000000000000:/var/vio/VMLibrary/c53b6b58-e6f3-4eca-8e90-c3e9e0f0babb:0x8100000000000000:lv16', 'vhost26:0x0000001c:0x8100000000000000:/var/vio/VMLibrary/fdf9b77e-e62c-4fc6-a52a-c355217adaea:0x8200000000000000:hdisk3', 'vhost27:0x0000001d:0x8100000000000000:/var/vio/VMLibrary/fdf9b77e-e62c-4fc6-a52a-c355217adaea:0x8200000000000000:hdisk4', ]) self.assertEqual(self.ivm_opt.prepare_vhost_dev_lun_dict(), ({'vhost0': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp2vd2'], 'vhost1': ['0x8200000000000000', ' ', '0x8100000000000000', 'lv02'], 'vhost10': ['0x8200000000000000', '/var/vio/VMLibrary/vopt_83f111feb8464f5aa0e2702d9cad54ae', '0x8100000000000000', 'lv03'], 'vhost11': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp13vd1'], 'vhost12': ['0x8200000000000000', '/var/vio/VMLibrary/vopt_d0fd2eed31034148a54659b2987e5ded', '0x8100000000000000', 'lv09'], 'vhost13': ['0x8200000000000000', '/var/vio/VMLibrary/8354e5a1-91dd-47fd-b3a0-7cf8f057cb43', '0x8100000000000000', 'lv14'], 'vhost14': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp16vd1'], 'vhost15': ['0x8200000000000000', '/var/vio/VMLibrary/e4efe10b-34e6-4bb2-b139-0d9a54e55456', '0x8100000000000000', 'lv11'], 'vhost16': ['0x8200000000000000', '/var/vio/VMLibrary/6c0ec0e7-7655-4932-bcca-cb30a6356fab', '0x8100000000000000', 'lv10'], 'vhost17': ['0x8200000000000000', '/var/vio/VMLibrary/8354e5a1-91dd-47fd-b3a0-7cf8f057cb43', '0x8100000000000000', 'lv13'], 'vhost18': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp20vd1'], 'vhost19': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp21vd1'], 'vhost2': ['0x8200000000000000', ' ', '0x8100000000000000', 'lv00'], 'vhost21': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'test-paxes'], 'vhost22': ['0x8200000000000000', '/var/vio/VMLibrary/b0d6fcc9-85ee-464e-af68-afdb921701af', '0x8100000000000000', 'lv12'], 'vhost23': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL-7.0-20140507.0-Server-ppc64-dvd1.iso', '0x8100000000000000', 'lp25vd1'], 'vhost24': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lv06'], 'vhost25': ['0x8200000000000000', '/var/vio/VMLibrary/c53b6b58-e6f3-4eca-8e90-c3e9e0f0babb', '0x8100000000000000', 'lv16'], 'vhost26': ['0x8100000000000000', '/var/vio/VMLibrary/fdf9b77e-e62c-4fc6-a52a-c355217adaea', '0x8200000000000000', 'hdisk3'], 'vhost27': ['0x8100000000000000', '/var/vio/VMLibrary/fdf9b77e-e62c-4fc6-a52a-c355217adaea', '0x8200000000000000', 'hdisk4'], 'vhost3': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.4-20130130.0-Server-ppc64-DVD1.', '0x8100000000000000', 'lv01'], 'vhost4': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lv04', '0x8300000000000000', 'lv04_ext'], 'vhost5': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lv05'], 'vhost6': ['0x8200000000000000', '/var/vio/VMLibrary/dc5e181d-797d-4e89-8b72-05e6d87519d5', '0x8100000000000000', 'lv20'], 'vhost7': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp8vd1'], 'vhost8': ['0x8200000000000000', '/var/vio/VMLibrary/RHEL6.5-2013-Server-ppc64-DVD.iso', '0x8100000000000000', 'lp10vd1']}, {'0x00000002': 'vhost0', '0x00000003': 'vhost1', '0x00000004': 'vhost2', '0x00000005': 'vhost3', '0x00000006': 'vhost4', '0x00000007': 'vhost5', '0x00000008': 'vhost7', '0x0000000a': 'vhost8', '0x0000000b': 'vhost10', '0x0000000c': 'vhost12', '0x0000000d': 'vhost11', '0x0000000e': 'vhost13', '0x0000000f': 'vhost6', '0x00000010': 'vhost14', '0x00000011': 'vhost15', '0x00000012': 'vhost16', '0x00000013': 'vhost17', '0x00000014': 'vhost18', '0x00000015': 'vhost19', '0x00000017': 'vhost21', '0x00000018': 'vhost22', '0x00000019': 'vhost23', '0x0000001a': 'vhost24', '0x0000001b': 'vhost25', '0x0000001c': 'vhost26', '0x0000001d': 'vhost27'})) @mock.patch('paxes_nova.virt.ibmpowervm.ivm.operator.IVMOperator.run_command') def test_check_dlpar_connectivity(self, mock_run_command): instance_name = 'instance-0000011f' mock_run_command.return_value = (['0,0,none']) self.assertEqual(self.ivm_opt.check_dlpar_connectivity(instance_name), (True, 'none'))
84.035398
366
0.442291
4,261
56,976
5.696315
0.116874
0.064601
0.030282
0.045485
0.801664
0.762566
0.724827
0.697512
0.683092
0.64457
0
0.208106
0.434429
56,976
678
367
84.035398
0.545122
0.019043
0
0.541118
0
0.041118
0.425286
0.285487
0
0
0.0441
0
0.060855
1
0.064145
false
0
0.011513
0
0.077303
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d2d48afb38c3513564e173a928d15347b6a0ee32
45
py
Python
temphelpers/__init__.py
wtsi-hgi/python-temp-helpers
bce5760df0d0b551d36c8f3cc222ff16ea6e0b8e
[ "MIT" ]
null
null
null
temphelpers/__init__.py
wtsi-hgi/python-temp-helpers
bce5760df0d0b551d36c8f3cc222ff16ea6e0b8e
[ "MIT" ]
null
null
null
temphelpers/__init__.py
wtsi-hgi/python-temp-helpers
bce5760df0d0b551d36c8f3cc222ff16ea6e0b8e
[ "MIT" ]
null
null
null
from temphelpers.managers import TempManager
22.5
44
0.888889
5
45
8
1
0
0
0
0
0
0
0
0
0
0
0
0.088889
45
1
45
45
0.97561
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
961414305224778c497c11c3bc9922b93c81a90c
141
py
Python
wbo-manuscript-figures/kinase_inhibitors_wbos/data_generation/visualize_kinase_inhibitors.py
choderalab/fragmenter_examples
01d63aea340e91f8cbb3a21253a906a0c3c66da3
[ "MIT" ]
null
null
null
wbo-manuscript-figures/kinase_inhibitors_wbos/data_generation/visualize_kinase_inhibitors.py
choderalab/fragmenter_examples
01d63aea340e91f8cbb3a21253a906a0c3c66da3
[ "MIT" ]
null
null
null
wbo-manuscript-figures/kinase_inhibitors_wbos/data_generation/visualize_kinase_inhibitors.py
choderalab/fragmenter_examples
01d63aea340e91f8cbb3a21253a906a0c3c66da3
[ "MIT" ]
null
null
null
import fragmenter oemols = fragmenter.chemi.file_to_oemols('kinase_inhibitors.smi') fragmenter.chemi.to_pdf(oemols, 'kinase_inhibitors.pdf')
35.25
65
0.836879
19
141
5.947368
0.526316
0.265487
0.389381
0
0
0
0
0
0
0
0
0
0.049645
141
4
66
35.25
0.843284
0
0
0
0
0
0.295775
0.295775
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
827bf419a623e5abe9086a5cebc93e284ad78eac
153
py
Python
todo/api/admin.py
abhayhk2001/Django-Todo
327e87ead1f227ee711515bfcbc126bd20e0a9b1
[ "MIT" ]
null
null
null
todo/api/admin.py
abhayhk2001/Django-Todo
327e87ead1f227ee711515bfcbc126bd20e0a9b1
[ "MIT" ]
null
null
null
todo/api/admin.py
abhayhk2001/Django-Todo
327e87ead1f227ee711515bfcbc126bd20e0a9b1
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Task, Context # Register your models here. admin.site.register(Task,) admin.site.register(Context)
21.857143
33
0.797386
22
153
5.545455
0.545455
0.147541
0.278689
0
0
0
0
0
0
0
0
0
0.111111
153
6
34
25.5
0.897059
0.169935
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
82dcb8d049dfd61acd1521318303495ec67c9896
162
py
Python
parts/edag_opto.py
baryluk/edag
675107e2078bcecb30768a5e96c7431104352024
[ "BSL-1.0" ]
null
null
null
parts/edag_opto.py
baryluk/edag
675107e2078bcecb30768a5e96c7431104352024
[ "BSL-1.0" ]
null
null
null
parts/edag_opto.py
baryluk/edag
675107e2078bcecb30768a5e96c7431104352024
[ "BSL-1.0" ]
null
null
null
#!/usr/bin/env python3 # The basic LEDs are in edag_components.py # Here more specialized parts and opto-isolators are available. # opto isolators "AQY212GS"
16.2
63
0.759259
24
162
5.083333
0.875
0.213115
0
0
0
0
0
0
0
0
0
0.029412
0.160494
162
9
64
18
0.867647
0.91358
0
0
0
0
0.444444
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
7d69649fc8f785622daff03a9b8638532222f9ef
361
py
Python
github_revision/settings.py
watchdogpolska/django-github-revision
e0c216f6a16f2336f17306e8d6820eddcecab746
[ "MIT" ]
null
null
null
github_revision/settings.py
watchdogpolska/django-github-revision
e0c216f6a16f2336f17306e8d6820eddcecab746
[ "MIT" ]
null
null
null
github_revision/settings.py
watchdogpolska/django-github-revision
e0c216f6a16f2336f17306e8d6820eddcecab746
[ "MIT" ]
null
null
null
from django.conf import settings REPO_URL = getattr(settings, 'GITHUB_REVISION_REPO_URL') BRANCH = getattr(settings, 'GITHUB_REVISION_BRANCH', 'master') FORMAT_URL = getattr(settings, 'GITHUB_REVISION_FORMAT_URL', "{repo_url}/compare/{revision_id}...{branch}") REVISION_BACKEND = getattr(settings, 'GITHUB_REVISION_BACKEND', 'github_revision.backends.auto')
40.111111
107
0.797784
45
361
6.044444
0.4
0.257353
0.308824
0.426471
0.235294
0
0
0
0
0
0
0
0.072022
361
9
108
40.111111
0.81194
0
0
0
0
0
0.477901
0.461326
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7d93e98e45090d2257fcf16278a229c3e8167bea
93
py
Python
libsimba/__init__.py
SIMBAChain/libsimba.py-platform
a815105a5ed84564c7eafbe01281473cebfb44e5
[ "MIT" ]
null
null
null
libsimba/__init__.py
SIMBAChain/libsimba.py-platform
a815105a5ed84564c7eafbe01281473cebfb44e5
[ "MIT" ]
2
2022-02-25T05:03:13.000Z
2022-03-09T13:56:56.000Z
libsimba/__init__.py
SIMBAChain/libsimba.py-platform
a815105a5ed84564c7eafbe01281473cebfb44e5
[ "MIT" ]
null
null
null
from .simba import Simba from .simba_async import SimbaAsync from .utils import SearchFilter
23.25
35
0.83871
13
93
5.923077
0.538462
0.233766
0
0
0
0
0
0
0
0
0
0
0.129032
93
3
36
31
0.950617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7db61334e349e9eb86b77f6c59bf7e9d2db74c2d
310
py
Python
tests/test_entrypoint.py
Edward-Knight/option6
2948695eb8ff1b22fa2ce9c04f85b736bc98d8c2
[ "MIT" ]
1
2021-02-08T23:20:59.000Z
2021-02-08T23:20:59.000Z
tests/test_entrypoint.py
Edward-Knight/option6
2948695eb8ff1b22fa2ce9c04f85b736bc98d8c2
[ "MIT" ]
1
2021-02-09T10:51:23.000Z
2021-02-09T10:51:23.000Z
tests/test_entrypoint.py
Edward-Knight/option6
2948695eb8ff1b22fa2ce9c04f85b736bc98d8c2
[ "MIT" ]
null
null
null
"""Test the entry point exposed by this module.""" import subprocess def test_help(): subprocess.check_call(["option6", "--help"]) def test_version(): subprocess.check_call(["option6", "--version"]) def test_module_interface(): subprocess.check_call(["python", "-m", "option6", "--version"])
20.666667
67
0.674194
37
310
5.459459
0.513514
0.10396
0.282178
0.257426
0
0
0
0
0
0
0
0.011194
0.135484
310
14
68
22.142857
0.742537
0.141935
0
0
0
0
0.203846
0
0
0
0
0
0
1
0.428571
true
0
0.142857
0
0.571429
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
5
7dc2412d2136b78bea0f4aaefd10b1e5df4cc9eb
58
py
Python
learning/fastbook/src/mnist_model.py
kaipak/ml-experiments
3727a9fc9a03f4db0e886cc317a466709b0a4572
[ "MIT" ]
null
null
null
learning/fastbook/src/mnist_model.py
kaipak/ml-experiments
3727a9fc9a03f4db0e886cc317a466709b0a4572
[ "MIT" ]
null
null
null
learning/fastbook/src/mnist_model.py
kaipak/ml-experiments
3727a9fc9a03f4db0e886cc317a466709b0a4572
[ "MIT" ]
null
null
null
from fastai.vision.all import * from fastbook import *
9.666667
31
0.741379
8
58
5.375
0.75
0
0
0
0
0
0
0
0
0
0
0
0.189655
58
5
32
11.6
0.914894
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7dca075ca3fcb02b9ef8368de36a1aaf12c1b804
65
py
Python
PythonAPI/scripts/draw_graphs.py
MaisJamal/Apollo-BT-GP
4d8d721fa16b67e7ecefdd05d937b1153c000d63
[ "Apache-2.0" ]
null
null
null
PythonAPI/scripts/draw_graphs.py
MaisJamal/Apollo-BT-GP
4d8d721fa16b67e7ecefdd05d937b1153c000d63
[ "Apache-2.0" ]
null
null
null
PythonAPI/scripts/draw_graphs.py
MaisJamal/Apollo-BT-GP
4d8d721fa16b67e7ecefdd05d937b1153c000d63
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import math import time import os
7.222222
22
0.676923
10
65
4.4
0.8
0
0
0
0
0
0
0
0
0
0
0.020408
0.246154
65
8
23
8.125
0.877551
0.323077
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
814ec7efa2ab7d2ec1524157137418e08d5767cf
60
py
Python
HumeanPoseEstimate/data/coco_data.py
YuHe0108/cvmodule
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
[ "Apache-2.0" ]
null
null
null
HumeanPoseEstimate/data/coco_data.py
YuHe0108/cvmodule
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
[ "Apache-2.0" ]
null
null
null
HumeanPoseEstimate/data/coco_data.py
YuHe0108/cvmodule
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
[ "Apache-2.0" ]
null
null
null
"""用于制作coco数据集,用于目标检测、人体的关键点检测""" import tensorflow as tf
20
34
0.75
7
60
6.428571
1
0
0
0
0
0
0
0
0
0
0
0
0.116667
60
2
35
30
0.849057
0.45
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
815b182d6619644490ca209c7e36baf2f82df1e5
156
py
Python
pyboletox/Contracts/Cnab/remessa.py
lucasbrahm/pyboletox
d7cfff477622a4d5df255045428325f04fbe695a
[ "MIT" ]
1
2022-03-14T12:22:42.000Z
2022-03-14T12:22:42.000Z
pyboletox/Contracts/Cnab/remessa.py
lucasbrahm/pyboletox
d7cfff477622a4d5df255045428325f04fbe695a
[ "MIT" ]
null
null
null
pyboletox/Contracts/Cnab/remessa.py
lucasbrahm/pyboletox
d7cfff477622a4d5df255045428325f04fbe695a
[ "MIT" ]
1
2022-03-14T12:22:27.000Z
2022-03-14T12:22:27.000Z
from abc import abstractmethod from pyboletox.Contracts.Cnab.cnab import Cnab class Remessa(Cnab): @abstractmethod def gerar(self): pass
15.6
46
0.724359
19
156
5.947368
0.684211
0
0
0
0
0
0
0
0
0
0
0
0.211538
156
9
47
17.333333
0.918699
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0.166667
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
5
816d8b7d9b3b217832d0981c77e6d3077e353793
14
py
Python
code.py
karanchhatwani/Summer19_karan
2d0a2b5dc07444f8908e8a78e72185fff3b7be37
[ "Apache-2.0" ]
null
null
null
code.py
karanchhatwani/Summer19_karan
2d0a2b5dc07444f8908e8a78e72185fff3b7be37
[ "Apache-2.0" ]
null
null
null
code.py
karanchhatwani/Summer19_karan
2d0a2b5dc07444f8908e8a78e72185fff3b7be37
[ "Apache-2.0" ]
null
null
null
hi hello bye
2.8
5
0.714286
3
14
3.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.285714
14
4
6
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
8197b5ee4a74f95cf092b31df9682eb5b99464fd
51
py
Python
src/schnetpack/atomistic/__init__.py
giadefa/schnetpack
9dabc3b6e3b28deb2fb3743ea1857c46b055efbf
[ "MIT" ]
450
2018-09-04T08:37:47.000Z
2022-03-30T08:05:37.000Z
src/schnetpack/atomistic/__init__.py
giadefa/schnetpack
9dabc3b6e3b28deb2fb3743ea1857c46b055efbf
[ "MIT" ]
239
2018-09-11T21:09:08.000Z
2022-03-18T09:25:11.000Z
src/schnetpack/atomistic/__init__.py
giadefa/schnetpack
9dabc3b6e3b28deb2fb3743ea1857c46b055efbf
[ "MIT" ]
166
2018-09-13T13:01:06.000Z
2022-03-31T12:59:12.000Z
from .model import * from .output_modules import *
17
29
0.764706
7
51
5.428571
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.156863
51
2
30
25.5
0.883721
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
81b45f19f458bc7f79f9a3a1a17dcd61664fac15
309
py
Python
photutils/utils/__init__.py
Onoddil/photutils
433f3e54d3f53282ae04eadde9e1ddf657944590
[ "BSD-3-Clause" ]
null
null
null
photutils/utils/__init__.py
Onoddil/photutils
433f3e54d3f53282ae04eadde9e1ddf657944590
[ "BSD-3-Clause" ]
null
null
null
photutils/utils/__init__.py
Onoddil/photutils
433f3e54d3f53282ae04eadde9e1ddf657944590
[ "BSD-3-Clause" ]
null
null
null
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This subpackage provides general-purpose utility functions. """ from .check_random_state import * # noqa from .colormaps import * # noqa from .errors import * # noqa from .exceptions import * # noqa from .interpolation import * # noqa
28.090909
63
0.734628
40
309
5.625
0.675
0.222222
0.248889
0
0
0
0
0
0
0
0
0.003937
0.177994
309
10
64
30.9
0.88189
0.475728
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
81e28bee0599ae1c6720abb7d8312e82e02349d6
192
py
Python
2 boolean.py
MotiMatika/teaching
71adf8160d2127391d4fce98b75e9269b546fb70
[ "MIT" ]
null
null
null
2 boolean.py
MotiMatika/teaching
71adf8160d2127391d4fce98b75e9269b546fb70
[ "MIT" ]
null
null
null
2 boolean.py
MotiMatika/teaching
71adf8160d2127391d4fce98b75e9269b546fb70
[ "MIT" ]
null
null
null
#x=True #y=0 #x=y<4 #print(x) # # x=True # y=0 # x=y==0 # print(x) # #x=True #y=0 #x=y!=9 #print(x) #3 #x='b' in 'bed' #print(x) # # x='b' in 'po' # print(x) # # help() x='b' in 'bo' print(x)
7.68
15
0.46875
47
192
1.914894
0.297872
0.4
0.2
0.233333
0.433333
0.433333
0.333333
0.333333
0
0
0
0.045161
0.192708
192
25
16
7.68
0.535484
0.651042
0
0
0
0
0.065217
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
1
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
81e5e2d734a88d1b4c2669be472bb81fd89da9f6
153
py
Python
Aula2/parametros.py
gabriel-correia0408/Sala_Green_GabrielCorreia
1d22f466d372786c5f8c8eaba7202844b5f03445
[ "Apache-2.0" ]
null
null
null
Aula2/parametros.py
gabriel-correia0408/Sala_Green_GabrielCorreia
1d22f466d372786c5f8c8eaba7202844b5f03445
[ "Apache-2.0" ]
null
null
null
Aula2/parametros.py
gabriel-correia0408/Sala_Green_GabrielCorreia
1d22f466d372786c5f8c8eaba7202844b5f03445
[ "Apache-2.0" ]
null
null
null
#qyalquer melhor método de pasar parametros def parametros(nome,idade,tamanho,endereco): print(f'Nome: {nome}\nIdade: {idade}\nTamanho: {tamanho}\n')
51
64
0.751634
21
153
5.47619
0.761905
0
0
0
0
0
0
0
0
0
0
0
0.098039
153
3
64
51
0.833333
0.27451
0
0
0
0
0.45045
0
0
0
0
0.333333
0
1
0.5
false
0
0
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
1
0
5
c4bda69f53d82831f77456e732652d77eb54c983
592
py
Python
venv/lib/python3.8/site-packages/azureml/_tracing/_status.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/azureml/_tracing/_status.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
null
null
null
venv/lib/python3.8/site-packages/azureml/_tracing/_status.py
amcclead7336/Enterprise_Data_Science_Final
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
[ "Unlicense", "MIT" ]
2
2021-05-23T16:46:31.000Z
2021-05-26T23:51:09.000Z
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import enum class StatusCode(enum.Enum): OK = 0 """Internal errors.""" INTERNAL = 13 class Status: def __init__(self, canonical_code=StatusCode.OK): self._canonical_code = canonical_code @property def canonical_code(self): return self._canonical_code @property def is_ok(self): return self._canonical_code == StatusCode.OK
22.769231
60
0.511824
53
592
5.45283
0.471698
0.269896
0.235294
0.186851
0.32872
0
0
0
0
0
0
0.006536
0.224662
592
25
61
23.68
0.623094
0.29223
0
0.153846
0
0
0
0
0
0
0
0
0
1
0.230769
false
0
0.076923
0.153846
0.769231
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
c4c107746a2ab5c12d09a1147cf3edf0ccea1d8e
3,908
py
Python
fhirclient/models/episodeofcare_tests.py
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
[ "Apache-2.0" ]
2
2019-05-16T16:41:22.000Z
2021-04-22T22:06:49.000Z
fhirclient/models/episodeofcare_tests.py
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
[ "Apache-2.0" ]
null
null
null
fhirclient/models/episodeofcare_tests.py
NematiLab/Streaming-Sepsis-Prediction-System-for-Intensive-Care-Units
fb5ad260fb8d264d85aea9e6c895d1700eea4d11
[ "Apache-2.0" ]
3
2019-03-26T01:39:18.000Z
2020-02-02T19:06:33.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 1.0.2.7202 on 2016-06-23. # 2016, SMART Health IT. import os import io import unittest import json from . import episodeofcare from .fhirdate import FHIRDate class EpisodeOfCareTests(unittest.TestCase): def instantiate_from(self, filename): datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or '' with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle: js = json.load(handle) self.assertEqual("EpisodeOfCare", js["resourceType"]) return episodeofcare.EpisodeOfCare(js) def testEpisodeOfCare1(self): inst = self.instantiate_from("episodeofcare-example.json") self.assertIsNotNone(inst, "Must have instantiated a EpisodeOfCare instance") self.implEpisodeOfCare1(inst) js = inst.as_json() self.assertEqual("EpisodeOfCare", js["resourceType"]) inst2 = episodeofcare.EpisodeOfCare(js) self.implEpisodeOfCare1(inst2) def implEpisodeOfCare1(self, inst): self.assertEqual(inst.careTeam[0].period.end.date, FHIRDate("2014-09-16").date) self.assertEqual(inst.careTeam[0].period.end.as_json(), "2014-09-16") self.assertEqual(inst.careTeam[0].period.start.date, FHIRDate("2014-09-01").date) self.assertEqual(inst.careTeam[0].period.start.as_json(), "2014-09-01") self.assertEqual(inst.careTeam[0].role[0].coding[0].code, "AO") self.assertEqual(inst.careTeam[0].role[0].coding[0].display, "Assessment Worker") self.assertEqual(inst.careTeam[0].role[0].coding[0].system, "http://example.org/EpisodeOfCare/Role") self.assertEqual(inst.id, "example") self.assertEqual(inst.identifier[0].system, "http://example.org/sampleepisodeofcare-identifier") self.assertEqual(inst.identifier[0].value, "123") self.assertEqual(inst.period.start.date, FHIRDate("2014-09-01").date) self.assertEqual(inst.period.start.as_json(), "2014-09-01") self.assertEqual(inst.status, "active") self.assertEqual(inst.statusHistory[0].period.end.date, FHIRDate("2014-09-14").date) self.assertEqual(inst.statusHistory[0].period.end.as_json(), "2014-09-14") self.assertEqual(inst.statusHistory[0].period.start.date, FHIRDate("2014-09-01").date) self.assertEqual(inst.statusHistory[0].period.start.as_json(), "2014-09-01") self.assertEqual(inst.statusHistory[0].status, "planned") self.assertEqual(inst.statusHistory[1].period.end.date, FHIRDate("2014-09-21").date) self.assertEqual(inst.statusHistory[1].period.end.as_json(), "2014-09-21") self.assertEqual(inst.statusHistory[1].period.start.date, FHIRDate("2014-09-15").date) self.assertEqual(inst.statusHistory[1].period.start.as_json(), "2014-09-15") self.assertEqual(inst.statusHistory[1].status, "active") self.assertEqual(inst.statusHistory[2].period.end.date, FHIRDate("2014-09-24").date) self.assertEqual(inst.statusHistory[2].period.end.as_json(), "2014-09-24") self.assertEqual(inst.statusHistory[2].period.start.date, FHIRDate("2014-09-22").date) self.assertEqual(inst.statusHistory[2].period.start.as_json(), "2014-09-22") self.assertEqual(inst.statusHistory[2].status, "onhold") self.assertEqual(inst.statusHistory[3].period.start.date, FHIRDate("2014-09-25").date) self.assertEqual(inst.statusHistory[3].period.start.as_json(), "2014-09-25") self.assertEqual(inst.statusHistory[3].status, "active") self.assertEqual(inst.text.status, "generated") self.assertEqual(inst.type[0].coding[0].code, "HACC") self.assertEqual(inst.type[0].coding[0].display, "Home and Community Care Package") self.assertEqual(inst.type[0].coding[0].system, "http://example.org/EpisodeOfCare/Type")
55.042254
108
0.688843
504
3,908
5.311508
0.210317
0.207322
0.248412
0.215166
0.678745
0.555099
0.490101
0.17669
0.149047
0.111319
0
0.068851
0.148925
3,908
70
109
55.828571
0.736019
0.028915
0
0.035088
1
0
0.152283
0.012404
0
0
0
0
0.666667
1
0.052632
false
0
0.105263
0
0.192982
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
c4d439ce842f743229d4f965e2d2641e4b3c5b49
174
py
Python
stack.py
Schmidt1519/sweepstakes
4febc284b4918b28c6dcd32ac300a24922c152b5
[ "MIT" ]
null
null
null
stack.py
Schmidt1519/sweepstakes
4febc284b4918b28c6dcd32ac300a24922c152b5
[ "MIT" ]
null
null
null
stack.py
Schmidt1519/sweepstakes
4febc284b4918b28c6dcd32ac300a24922c152b5
[ "MIT" ]
null
null
null
class Stack: def __init__(self): self.stack = [] def push(self, item): self.stack.append(item) def pop(self): return self.stack.pop(-1)
17.4
33
0.557471
23
174
4.043478
0.478261
0.290323
0
0
0
0
0
0
0
0
0
0.008264
0.304598
174
10
33
17.4
0.760331
0
0
0
0
0
0
0
0
0
0
0
0
1
0.428571
false
0
0
0.142857
0.714286
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
f228539ac57963ce16d65dba9ec65f12d1254efa
114
py
Python
src/requests/new_user_request.py
danielvcm/to-do-api
ca560dec3672222c98cd287961ebcd41f2792a7b
[ "MIT" ]
null
null
null
src/requests/new_user_request.py
danielvcm/to-do-api
ca560dec3672222c98cd287961ebcd41f2792a7b
[ "MIT" ]
null
null
null
src/requests/new_user_request.py
danielvcm/to-do-api
ca560dec3672222c98cd287961ebcd41f2792a7b
[ "MIT" ]
null
null
null
from pydantic import BaseModel class NewUserRequest(BaseModel): name: str user_name: str password: str
22.8
32
0.745614
14
114
6
0.714286
0.166667
0
0
0
0
0
0
0
0
0
0
0.201754
114
5
33
22.8
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.2
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
5
481cccdf2398aa0c596463752540cc86d492d154
60
py
Python
Lib/urllib/__init__.py
Ixyk-Wolf/cpython
0a6136ab724a2bf1e0d99f0d480fad7c1e77dbd4
[ "0BSD" ]
null
null
null
Lib/urllib/__init__.py
Ixyk-Wolf/cpython
0a6136ab724a2bf1e0d99f0d480fad7c1e77dbd4
[ "0BSD" ]
null
null
null
Lib/urllib/__init__.py
Ixyk-Wolf/cpython
0a6136ab724a2bf1e0d99f0d480fad7c1e77dbd4
[ "0BSD" ]
null
null
null
from . import request, response, parse, error, robotparser
30
59
0.766667
7
60
6.571429
1
0
0
0
0
0
0
0
0
0
0
0
0.15
60
1
60
60
0.901961
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
481f2f7e67b0c9b0a66ae4bd279b075b5ec6feaf
15,290
py
Python
surfify/models/vae.py
AGrigis/surfify
180f7967f435d886e28df79be502406ee7946712
[ "CECILL-B" ]
null
null
null
surfify/models/vae.py
AGrigis/surfify
180f7967f435d886e28df79be502406ee7946712
[ "CECILL-B" ]
null
null
null
surfify/models/vae.py
AGrigis/surfify
180f7967f435d886e28df79be502406ee7946712
[ "CECILL-B" ]
null
null
null
# -*- coding: utf-8 -*- ########################################################################## # NSAp - Copyright (C) CEA, 2021 # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## """ Cortical Spherical Variational Auto-Encoder (GMVAE) models. [1] Representation Learning of Resting State fMRI with Variational Autoencoder: https://github.com/libilab/rsfMRI-VAE """ # Imports import torch import torch.nn as nn from torch.distributions import Normal from ..utils import get_logger, debug_msg from ..nn import IcoUpConv, IcoPool, IcoSpMaConv, IcoSpMaConvTranspose from .base import SphericalBase # Global parameters logger = get_logger() class SphericalVAE(SphericalBase): """ Spherical VAE architecture. Use either RePa - Rectangular Patch convolution method or DiNe - Direct Neighbor convolution method. Notes ----- Debuging messages can be displayed by changing the log level using ``setup_logging(level='debug')``. See Also -------- SphericalGVAE References ---------- Representation Learning of Resting State fMRI with Variational Autoencoder, NeuroImage 2021. """ def __init__(self, input_channels=1, input_order=5, latent_dim=64, conv_flts=[32, 32, 64, 64], conv_mode="DiNe", dine_size=1, repa_size=5, repa_zoom=5, use_freesurfer=True, cachedir=None): """ Init class. Parameters ---------- input_channels: int, default 1 the number of input channels. input_order: int, default 5 the input icosahedron order. latent_dim: int, default 64 the size of the stochastic latent state of the SVAE. conv_flts: list of int the size of convolutional filters. conv_mode: str, default 'DiNe' use either 'RePa' - Rectangular Patch convolution method or 'DiNe' - 1 ring Direct Neighbor convolution method. dine_size: int, default 1 the size of the spherical convolution filter, ie. the number of neighbor rings to be considered. repa_size: int, default 5 the size of the rectangular grid in the tangent space. repa_zoom: int, default 5 a multiplicative factor applied to the rectangular grid in the tangent space. use_freesurfer: bool, default True optionaly use surfify tesselation. cachedir: str, default None set this folder to use smart caching speedup. """ logger.debug("SphericalVAE init...") super(SphericalVAE, self).__init__( input_order=input_order, n_layers=len(conv_flts), conv_mode=conv_mode, dine_size=dine_size, repa_size=repa_size, repa_zoom=repa_zoom, use_freesurfer=use_freesurfer, cachedir=cachedir) self.input_channels = input_channels self.latent_dim = latent_dim self.conv_flts = conv_flts self.top_flatten_dim = len( self.ico[self.input_order - self.n_layers + 1].vertices) self.top_final = self.conv_flts[-1] * self.top_flatten_dim # define the encoder self.enc_left_conv = self.sconv( input_channels, int(self.conv_flts[0] / 2), self.ico[self.input_order].conv_neighbor_indices) self.enc_right_conv = self.sconv( input_channels, int(self.conv_flts[0] / 2), self.ico[self.input_order].conv_neighbor_indices) self.enc_w_conv = nn.Sequential() for idx in range(1, self.n_layers): order = self.input_order - idx pooling = IcoPool( down_neigh_indices=self.ico[order + 1].neighbor_indices, down_indices=self.ico[order + 1].down_indices, pooling_type="mean") self.enc_w_conv.add_module("pooling_{0}".format(idx), pooling) conv = self.sconv( self.conv_flts[idx - 1], self.conv_flts[idx], self.ico[order].conv_neighbor_indices) self.enc_w_conv.add_module("down_{0}".format(idx), conv) self.enc_w_dense = nn.Linear(self.top_final, self.latent_dim * 2) # define the decoder self.dec_w_dense = nn.Linear(self.latent_dim, self.top_final) self.dec_w_conv = nn.Sequential() cnt = 1 for idx in range(self.n_layers - 1, 0, -1): tconv = IcoUpConv( in_feats=self.conv_flts[idx], out_feats=self.conv_flts[idx - 1], up_neigh_indices=self.ico[order + 1].neighbor_indices, down_indices=self.ico[order + 1].down_indices) self.dec_w_conv.add_module("up_{0}".format(cnt), tconv) order += 1 cnt += 1 self.dec_left_conv = IcoUpConv( in_feats=int(self.conv_flts[0] / 2), out_feats=self.input_channels, up_neigh_indices=self.ico[order].neighbor_indices, down_indices=self.ico[order].down_indices) self.dec_right_conv = IcoUpConv( in_feats=int(self.conv_flts[0] / 2), out_feats=self.input_channels, up_neigh_indices=self.ico[order].neighbor_indices, down_indices=self.ico[order].down_indices) self.relu = nn.ReLU(inplace=True) def encode(self, left_x, right_x): """ The encoder. Parameters ---------- left_x: Tensor (samples, <input_channels>, azimuth, elevation) input left cortical texture. right_x: Tensor (samples, <input_channels>, azimuth, elevation) input right cortical texture. Returns ------- q(z | x): Normal (batch_size, <latent_dim>) a Normal distribution. """ x = torch.cat( (self.enc_left_conv(left_x), self.enc_right_conv(right_x)), dim=1) x = self.relu(x) for layer_idx in range((self.n_layers - 1) * 2): if isinstance(self.enc_w_conv[layer_idx], IcoPool): x = self.enc_w_conv[layer_idx](x)[0] else: x = self.relu(self.enc_w_conv[layer_idx](x)) x = x.reshape(-1, self.top_final) x = self.enc_w_dense(x) z_mu, z_logvar = torch.chunk(x, chunks=2, dim=1) return Normal(loc=z_mu, scale=z_logvar.exp().pow(0.5)) def decode(self, z): """ The decoder. Parameters ---------- z: Tensor (samples, <latent_dim>) the stochastic latent state z. Returns ------- left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed left cortical texture. right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed right cortical texture. """ x = self.relu(self.dec_w_dense(z)) x = x.view(-1, self.conv_flts[-1], self.top_flatten_dim) for layer_idx in range(self.n_layers - 1): x = self.relu(self.dec_w_conv[layer_idx](x)) left_recon_x, right_recon_x = torch.chunk(x, chunks=2, dim=1) left_recon_x = self.dec_left_conv(left_recon_x) right_recon_x = self.dec_right_conv(right_recon_x) return left_recon_x, right_recon_x def reparameterize(self, q): """ Implement the reparametrization trick. """ if self.training: z = q.rsample() else: z = q.loc return z def forward(self, left_x, right_x): """ The forward method. Parameters ---------- left_x: Tensor (samples, <input_channels>, azimuth, elevation) input left cortical texture. right_x: Tensor (samples, <input_channels>, azimuth, elevation) input right cortical texture. Returns ------- left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed left cortical texture. right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed right cortical texture. """ logger.debug("SphericalVAE forward pass") logger.debug(debug_msg("left cortical", left_x)) logger.debug(debug_msg("right cortical", right_x)) q = self.encode(left_x, right_x) logger.debug(debug_msg("posterior loc", q.loc)) logger.debug(debug_msg("posterior scale", q.scale)) z = self.reparameterize(q) logger.debug(debug_msg("z", z)) left_recon_x, right_recon_x = self.decode(z) logger.debug(debug_msg("left recon cortical", left_recon_x)) logger.debug(debug_msg("right recon cortical", right_recon_x)) return left_recon_x, right_recon_x, {"q": q, "z": z} class SphericalGVAE(nn.Module): """ Spherical Grided VAE architecture. Use SpMa - Spherical Mapping convolution method. Notes ----- Debuging messages can be displayed by changing the log level using ``setup_logging(level='debug')``. See Also -------- SphericalVAE References ---------- Representation Learning of Resting State fMRI with Variational Autoencoder, NeuroImage 2021. """ def __init__(self, input_channels=1, input_dim=192, latent_dim=64, conv_flts=[64, 128, 128, 256, 256]): """ Init class. Parameters ---------- input_channels: int, default 1 the number of input channels. input_dim: int, default 192 the size of the converted 3-D surface to the 2-D grid. latent_dim: int, default 64 the size of the stochastic latent state of the SVAE. conv_flts: list of int the size of convolutional filters. """ logger.debug("SphericalGVAE init...") super(SphericalGVAE, self).__init__() self.input_channels = input_channels self.input_dim = input_dim self.latent_dim = latent_dim self.conv_flts = conv_flts self.n_layers = len(self.conv_flts) self.top_flatten_dim = int(self.input_dim / (2 ** self.n_layers)) self.top_final = self.conv_flts[-1] * self.top_flatten_dim ** 2 # define the encoder self.enc_left_conv = IcoSpMaConv( in_feats=self.input_channels, out_feats=int(self.conv_flts[0] / 2), kernel_size=8, stride=2, pad=3) self.enc_right_conv = IcoSpMaConv( in_feats=self.input_channels, out_feats=int(self.conv_flts[0] / 2), kernel_size=8, stride=2, pad=3) self.enc_w_conv = nn.ModuleList([ IcoSpMaConv(self.conv_flts[i - 1], self.conv_flts[i], kernel_size=4, stride=2, pad=1) for i in range(1, self.n_layers)]) self.enc_w_dense = nn.Linear(self.top_final, self.latent_dim * 2) # define the decoder self.dec_w_dense = nn.Linear(self.latent_dim, self.top_final) self.dec_w_conv = nn.ModuleList([ IcoSpMaConvTranspose( in_feats=self.conv_flts[i], out_feats=self.conv_flts[i - 1], kernel_size=4, stride=2, pad=1, zero_pad=3) for i in range(self.n_layers - 1, 0, -1)]) self.dec_left_conv = IcoSpMaConvTranspose( in_feats=int(self.conv_flts[0] / 2), out_feats=self.input_channels, kernel_size=8, stride=2, pad=3, zero_pad=9) self.dec_right_conv = IcoSpMaConvTranspose( in_feats=int(self.conv_flts[0] / 2), out_feats=self.input_channels, kernel_size=8, stride=2, pad=3, zero_pad=9) self.relu = nn.ReLU(inplace=True) def encode(self, left_x, right_x): """ The encoder. Parameters ---------- left_x: Tensor (samples, <input_channels>, azimuth, elevation) input left cortical texture. right_x: Tensor (samples, <input_channels>, azimuth, elevation) input right cortical texture. Returns ------- q(z | x): Normal (batch_size, <latent_dim>) a Normal distribution. """ x = torch.cat( (self.enc_left_conv(left_x), self.enc_right_conv(right_x)), dim=1) x = self.relu(x) for layer_idx in range(self.n_layers - 1): x = self.relu(self.enc_w_conv[layer_idx](x)) x = x.view(-1, self.top_final) x = self.enc_w_dense(x) z_mu, z_logvar = torch.chunk(x, chunks=2, dim=1) return Normal(loc=z_mu, scale=z_logvar.exp().pow(0.5)) def decode(self, z): """ The decoder. Parameters ---------- z: Tensor (samples, <latent_dim>) the stochastic latent state z. Returns ------- left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed left cortical texture. right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed right cortical texture. """ x = self.relu(self.dec_w_dense(z)) x = x.view(-1, self.conv_flts[-1], self.top_flatten_dim, self.top_flatten_dim) for layer_idx in range(self.n_layers - 1): x = self.relu(self.dec_w_conv[layer_idx](x)) left_recon_x, right_recon_x = torch.chunk(x, chunks=2, dim=1) left_recon_x = self.dec_left_conv(left_recon_x) right_recon_x = self.dec_right_conv(right_recon_x) return left_recon_x, right_recon_x def reparameterize(self, q): """ Implement the reparametrization trick. """ if self.training: z = q.rsample() else: z = q.loc return z def forward(self, left_x, right_x): """ The forward method. Parameters ---------- left_x: Tensor (samples, <input_channels>, azimuth, elevation) input left cortical texture. right_x: Tensor (samples, <input_channels>, azimuth, elevation) input right cortical texture. Returns ------- left_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed left cortical texture. right_recon_x: Tensor (samples, <input_channels>, azimuth, elevation) reconstructed right cortical texture. """ logger.debug("SphericalGVAE forward pass") logger.debug(debug_msg("left cortical", left_x)) logger.debug(debug_msg("right cortical", right_x)) q = self.encode(left_x, right_x) logger.debug(debug_msg("posterior loc", q.loc)) logger.debug(debug_msg("posterior scale", q.scale)) z = self.reparameterize(q) logger.debug(debug_msg("z", z)) left_recon_x, right_recon_x = self.decode(z) logger.debug(debug_msg("left recon cortical", left_recon_x)) logger.debug(debug_msg("right recon cortical", right_recon_x)) return left_recon_x, right_recon_x, {"q": q, "z": z}
38.225
78
0.602158
1,961
15,290
4.473738
0.131056
0.024621
0.03146
0.034652
0.780691
0.756412
0.734526
0.712641
0.70318
0.683803
0
0.014374
0.281099
15,290
399
79
38.320802
0.783752
0.321321
0
0.566845
0
0
0.03481
0
0
0
0
0
0
1
0.053476
false
0.010695
0.032086
0
0.139037
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
48316ba47c2fc99ec6ad0d6e2f8d05a598907431
410
py
Python
tests/stdlib/test_urllib2_localnet.py
CaesarLinsa/Eventlet
43e832b441977f0344a56d22b267dca443333960
[ "MIT" ]
19
2015-05-01T19:59:03.000Z
2021-12-09T08:03:16.000Z
tests/stdlib/test_urllib2_localnet.py
CaesarLinsa/Eventlet
43e832b441977f0344a56d22b267dca443333960
[ "MIT" ]
1
2018-01-03T15:26:49.000Z
2018-01-03T15:26:49.000Z
tests/stdlib/test_urllib2_localnet.py
CaesarLinsa/Eventlet
43e832b441977f0344a56d22b267dca443333960
[ "MIT" ]
30
2015-03-25T19:40:07.000Z
2021-05-28T22:59:26.000Z
from eventlet import patcher from eventlet.green import BaseHTTPServer from eventlet.green import threading from eventlet.green import socket from eventlet.green import urllib2 patcher.inject('test.test_urllib2_localnet', globals(), ('BaseHTTPServer', BaseHTTPServer), ('threading', threading), ('socket', socket), ('urllib2', urllib2)) if __name__ == "__main__": test_main()
25.625
44
0.72439
44
410
6.5
0.363636
0.20979
0.237762
0.321678
0
0
0
0
0
0
0
0.01173
0.168293
410
16
45
25.625
0.826979
0
0
0
0
0
0.170316
0.06326
0
0
0
0
0
1
0
true
0
0.384615
0
0.384615
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6f8825cd5fc896a74e5eb19c8c314dbd8ca932bb
2,622
py
Python
backendapp/travels/migrations/0039_auto_20200325_1342.py
finebrush/takeatripsFB
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
[ "BSD-3-Clause" ]
null
null
null
backendapp/travels/migrations/0039_auto_20200325_1342.py
finebrush/takeatripsFB
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
[ "BSD-3-Clause" ]
13
2020-02-12T03:05:15.000Z
2022-02-10T14:26:50.000Z
backendapp/travels/migrations/0039_auto_20200325_1342.py
finebrush/takeatripsFB
85a5be1a2ee68531f04f2601a3f69ddc608d4d27
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 2.2.7 on 2020-03-25 04:42 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('common', '0017_auto_20200325_1342'), ('travels', '0038_auto_20200123_1535'), ] operations = [ migrations.AddField( model_name='buypart', name='pin', field=models.ManyToManyField(to='common.PinBuy', verbose_name='사다 핀'), ), migrations.AddField( model_name='funpart', name='pin', field=models.ManyToManyField(to='common.PinFun', verbose_name='놀다 핀'), ), migrations.CreateModel( name='EatPart', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('biztimeko', models.CharField(max_length=128, verbose_name='영업시간(한국어)')), ('biztimeeng', models.CharField(max_length=128, verbose_name='영업시간(영어)')), ('biztimeven', models.CharField(max_length=128, verbose_name='영업시간(베트남어)')), ('menuko', models.CharField(max_length=128, verbose_name='대표메뉴(한국어)')), ('menueng', models.CharField(max_length=128, verbose_name='대표메뉴(영어)')), ('menuven', models.CharField(max_length=128, verbose_name='대표메뉴(베트남어)')), ('part', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='travels.InfoTravel')), ('pin', models.ManyToManyField(to='common.PinEat', verbose_name='먹다 핀')), ], ), migrations.CreateModel( name='DrinkPart', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('biztimeko', models.CharField(max_length=128, verbose_name='영업시간(한국어)')), ('biztimeeng', models.CharField(max_length=128, verbose_name='영업시간(영어)')), ('biztimeven', models.CharField(max_length=128, verbose_name='영업시간(베트남어)')), ('menuko', models.CharField(max_length=128, verbose_name='대표메뉴(한국어)')), ('menueng', models.CharField(max_length=128, verbose_name='대표메뉴(영어)')), ('menuven', models.CharField(max_length=128, verbose_name='대표메뉴(베트남어)')), ('part', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='travels.InfoTravel')), ('pin', models.ManyToManyField(to='common.PinDrink', verbose_name='마시다 핀')), ], ), ]
48.555556
117
0.596873
277
2,622
5.490975
0.292419
0.130178
0.142012
0.189349
0.723208
0.723208
0.723208
0.669297
0.669297
0.669297
0
0.042004
0.246377
2,622
53
118
49.471698
0.727733
0.017162
0
0.638298
1
0
0.16699
0.017864
0
0
0
0
0
1
0
false
0
0.042553
0
0.106383
0
0
0
0
null
0
0
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6fbedbd183e484726e72d45dba143842450a24a5
199
py
Python
FactorLib/data_source/__init__.py
wanhanwan/Packages
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
[ "MIT" ]
5
2018-06-29T16:56:10.000Z
2019-06-20T03:31:44.000Z
FactorLib/data_source/__init__.py
wanhanwan/Packages
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
[ "MIT" ]
null
null
null
FactorLib/data_source/__init__.py
wanhanwan/Packages
14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3
[ "MIT" ]
3
2018-06-25T06:37:17.000Z
2018-11-22T08:12:20.000Z
# coding: utf-8 from .converter import IndustryConverter from .trade_calendar import to_offset try: from rqdatac import * import rqdatac as rq except ModuleNotFoundError: pass
19.9
41
0.728643
24
199
5.958333
0.75
0
0
0
0
0
0
0
0
0
0
0.006536
0.231156
199
9
42
22.111111
0.928105
0.065327
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.142857
0.571429
0
0.571429
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
6fd41ee838300a7edd4327ea2a8bbe79fc850cd7
57
py
Python
__main__.py
NonSvizzero/whatsapp-assistant-bot
af3c25f727c9a9e06c1cff83047fa3d60ccf8936
[ "MIT" ]
null
null
null
__main__.py
NonSvizzero/whatsapp-assistant-bot
af3c25f727c9a9e06c1cff83047fa3d60ccf8936
[ "MIT" ]
null
null
null
__main__.py
NonSvizzero/whatsapp-assistant-bot
af3c25f727c9a9e06c1cff83047fa3d60ccf8936
[ "MIT" ]
null
null
null
import threading from src import whatsapp_assistant_bot
14.25
38
0.877193
8
57
6
0.875
0
0
0
0
0
0
0
0
0
0
0
0.122807
57
3
39
19
0.96
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6fff4bd000eb522551dfe5b4d6af71b3d1901fc9
86
py
Python
wordclouds/__init__.py
brandons209/FlapJack-Cogs
9a959d27b543b5e4ad321979f1c80fbff7556a02
[ "MIT" ]
null
null
null
wordclouds/__init__.py
brandons209/FlapJack-Cogs
9a959d27b543b5e4ad321979f1c80fbff7556a02
[ "MIT" ]
null
null
null
wordclouds/__init__.py
brandons209/FlapJack-Cogs
9a959d27b543b5e4ad321979f1c80fbff7556a02
[ "MIT" ]
null
null
null
from .wordclouds import WordClouds def setup(bot): bot.add_cog(WordClouds(bot))
14.333333
34
0.744186
12
86
5.25
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.151163
86
5
35
17.2
0.863014
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
b50f7bbfd7aba89edee4bcd462ae995aa9233f3b
21,886
py
Python
dendri/tests/test_dates.py
Naissant/dendri
ac6223ccdd96331363d04fef5ddc3ab1a5f8db13
[ "MIT" ]
null
null
null
dendri/tests/test_dates.py
Naissant/dendri
ac6223ccdd96331363d04fef5ddc3ab1a5f8db13
[ "MIT" ]
5
2021-02-05T23:30:16.000Z
2021-03-16T12:30:31.000Z
dendri/tests/test_dates.py
Naissant/dendri
ac6223ccdd96331363d04fef5ddc3ab1a5f8db13
[ "MIT" ]
1
2021-02-26T18:34:06.000Z
2021-02-26T18:34:06.000Z
from datetime import date import pytest import pyspark.sql.functions as F from dendri.dates import ( condense_segments, extend_segments, covered_days, first_event_in_x_days, count_events_by_period, age, ) def d(s): return date.fromisoformat(s) class TestCondenseSegments: input_df = ( [ # Scenario #1: Non-overlap segment and segments 0 days apart ("001", "100", d("2017-01-01"), d("2017-01-02")), ("001", "101", d("2018-01-01"), d("2018-01-15")), ("001", "102", d("2018-01-15"), d("2018-01-20")), # Scenario #2: Segments 1 days apart ("002", "200", d("2018-02-01"), d("2018-02-15")), ("002", "201", d("2018-02-16"), d("2018-02-20")), # Scenario #3: Segments 2 days apart ("003", "300", d("2018-03-01"), d("2018-03-15")), ("003", "301", d("2018-03-17"), d("2018-03-20")), # Scenario #4: Segments 1 day apart with nested segment ("004", "400", d("2019-08-01"), d("2019-08-07")), ("004", "401", d("2019-08-05"), d("2019-08-06")), ("004", "402", d("2019-08-08"), d("2019-08-15")), ], ["entity_id", "iterator", "start_dt", "end_dt"], ) output_df_tolerance_1 = ( [ ( "001", "100", d("2017-01-01"), d("2017-01-02"), d("2017-01-01"), d("2017-01-02"), ), ( "001", "101", d("2018-01-01"), d("2018-01-15"), d("2018-01-01"), d("2018-01-20"), ), ( "001", "102", d("2018-01-15"), d("2018-01-20"), d("2018-01-01"), d("2018-01-20"), ), ( "002", "200", d("2018-02-01"), d("2018-02-15"), d("2018-02-01"), d("2018-02-20"), ), ( "002", "201", d("2018-02-16"), d("2018-02-20"), d("2018-02-01"), d("2018-02-20"), ), ( "003", "300", d("2018-03-01"), d("2018-03-15"), d("2018-03-01"), d("2018-03-15"), ), ( "003", "301", d("2018-03-17"), d("2018-03-20"), d("2018-03-17"), d("2018-03-20"), ), ( "004", "400", d("2019-08-01"), d("2019-08-07"), d("2019-08-01"), d("2019-08-15"), ), ( "004", "401", d("2019-08-05"), d("2019-08-06"), d("2019-08-01"), d("2019-08-15"), ), ( "004", "402", d("2019-08-08"), d("2019-08-15"), d("2019-08-01"), d("2019-08-15"), ), ], [ "entity_id", "iterator", "start_dt", "end_dt", "condense_start_dt", "condense_end_dt", ], ) output_df_tolerance_2 = ( [ ( "001", "100", d("2017-01-01"), d("2017-01-02"), d("2017-01-01"), d("2017-01-02"), ), ( "001", "101", d("2018-01-01"), d("2018-01-15"), d("2018-01-01"), d("2018-01-20"), ), ( "001", "102", d("2018-01-15"), d("2018-01-20"), d("2018-01-01"), d("2018-01-20"), ), ( "002", "200", d("2018-02-01"), d("2018-02-15"), d("2018-02-01"), d("2018-02-20"), ), ( "002", "201", d("2018-02-16"), d("2018-02-20"), d("2018-02-01"), d("2018-02-20"), ), ( "003", "300", d("2018-03-01"), d("2018-03-15"), d("2018-03-01"), d("2018-03-20"), ), ( "003", "301", d("2018-03-17"), d("2018-03-20"), d("2018-03-01"), d("2018-03-20"), ), ( "004", "400", d("2019-08-01"), d("2019-08-07"), d("2019-08-01"), d("2019-08-15"), ), ( "004", "401", d("2019-08-05"), d("2019-08-06"), d("2019-08-01"), d("2019-08-15"), ), ( "004", "402", d("2019-08-08"), d("2019-08-15"), d("2019-08-01"), d("2019-08-15"), ), ], [ "entity_id", "iterator", "start_dt", "end_dt", "condense_start_dt", "condense_end_dt", ], ) output_df_tolerance_1_no_retain = ( [ ( "001", d("2017-01-01"), d("2017-01-02"), ), ( "001", d("2018-01-01"), d("2018-01-20"), ), ( "002", d("2018-02-01"), d("2018-02-20"), ), ( "003", d("2018-03-01"), d("2018-03-15"), ), ( "003", d("2018-03-17"), d("2018-03-20"), ), ( "004", d("2019-08-01"), d("2019-08-15"), ), ], [ "entity_id", "start_dt", "end_dt", ], ) output_df_org = ( [ ( "001", "100", d("2017-01-01"), d("2017-01-02"), d("2017-01-01"), d("2017-01-02"), ), ( "001", "101", d("2018-01-01"), d("2018-01-15"), d("2018-01-01"), d("2018-01-15"), ), ( "001", "102", d("2018-01-15"), d("2018-01-20"), d("2018-01-15"), d("2018-01-20"), ), ( "002", "200", d("2018-02-01"), d("2018-02-15"), d("2018-02-01"), d("2018-02-15"), ), ( "002", "201", d("2018-02-16"), d("2018-02-20"), d("2018-02-16"), d("2018-02-20"), ), ( "003", "300", d("2018-03-01"), d("2018-03-15"), d("2018-03-01"), d("2018-03-15"), ), ( "003", "301", d("2018-03-17"), d("2018-03-20"), d("2018-03-17"), d("2018-03-20"), ), ( "004", "400", d("2019-08-01"), d("2019-08-07"), d("2019-08-01"), d("2019-08-07"), ), ( "004", "401", d("2019-08-05"), d("2019-08-06"), d("2019-08-05"), d("2019-08-06"), ), ( "004", "402", d("2019-08-08"), d("2019-08-15"), d("2019-08-08"), d("2019-08-15"), ), ], [ "entity_id", "iterator", "start_dt", "end_dt", "condense_start_dt", "condense_end_dt", ], ) @pytest.mark.parametrize( "df,group_col,start_dt_col,end_dt_col,tolerance,retain_shape,expected_output", [ ( input_df, "entity_id", "start_dt", "end_dt", 1, True, output_df_tolerance_1, ), ( input_df, "entity_id", "start_dt", "end_dt", 2, True, output_df_tolerance_2, ), ( input_df, ["entity_id"], "start_dt", "end_dt", 1, True, output_df_tolerance_1, ), ( input_df, ["entity_id", "iterator"], "start_dt", "end_dt", 1, True, output_df_org, ), ( input_df, "entity_id", "start_dt", "end_dt", 1, False, output_df_tolerance_1_no_retain, ), ], ) def test_condense_segments( self, spark_context, df, group_col, start_dt_col, end_dt_col, tolerance, retain_shape, expected_output, ): res = condense_segments( spark_context.createDataFrame(*df), group_col, start_dt_col, end_dt_col, tolerance, retain_shape, ) exp = spark_context.createDataFrame(*expected_output) assert sorted(res.collect()) == sorted(exp.collect()) class TestExtendSegments: input_df = ( [ # Scenario #1: Non-overlapping sement and segments 0 days apart ("001", "100", d("2017-01-01"), d("2017-01-02")), ("001", "101", d("2018-01-01"), d("2018-01-15")), ("001", "102", d("2018-01-15"), d("2018-01-20")), # Scenario #2: Segments 1 day apart ("002", "200", d("2018-02-01"), d("2018-02-15")), ("002", "201", d("2018-02-16"), d("2018-02-20")), # Scenario #3: Segments 2 days apart ("003", "300", d("2018-03-01"), d("2018-03-15")), ("003", "301", d("2018-03-17"), d("2018-03-20")), # Scenario #4: Segments 1 day apart with nested segment ("004", "400", d("2019-08-01"), d("2019-08-07")), ("004", "401", d("2019-08-05"), d("2019-08-06")), ("004", "402", d("2019-08-08"), d("2019-08-15")), # Scenario #5: Duplicate segments and segments 2 days apart ("005", "500", d("2019-01-01"), d("2019-01-02")), ("005", "501", d("2019-01-01"), d("2019-01-02")), ("005", "502", d("2019-01-04"), d("2019-01-05")), ], ["entity_id", "iterator", "start_dt", "end_dt"], ) output_df = ( [ ("001", "100", d("2017-01-01"), d("2017-01-02")), ("001", "101", d("2018-01-01"), d("2018-01-21")), ("001", "102", d("2018-01-01"), d("2018-01-21")), ("002", "200", d("2018-02-01"), d("2018-02-20")), ("002", "201", d("2018-02-01"), d("2018-02-20")), ("003", "300", d("2018-03-01"), d("2018-03-15")), ("003", "301", d("2018-03-17"), d("2018-03-20")), ("004", "400", d("2019-08-01"), d("2019-08-17")), ("004", "401", d("2019-08-01"), d("2019-08-17")), ("004", "402", d("2019-08-01"), d("2019-08-17")), ("005", "500", d("2019-01-01"), d("2019-01-06")), ("005", "501", d("2019-01-01"), d("2019-01-06")), ("005", "502", d("2019-01-01"), d("2019-01-06")), ], ["entity_id", "iterator", "start_dt", "end_dt"], ) @pytest.mark.parametrize( "df, group_col, start_dt_col, end_dt_col, tolerance, expected_output", [ (input_df, "entity_id", "start_dt", "end_dt", 1, output_df), (input_df, ["entity_id"], "start_dt", "end_dt", 1, output_df), (input_df, ["entity_id", "iterator"], "start_dt", "end_dt", 1, input_df), ], ) def test_extend_segments( self, spark_context, df, group_col, start_dt_col, end_dt_col, tolerance, expected_output, ): res = extend_segments( spark_context.createDataFrame(*df), group_col, start_dt_col, end_dt_col, tolerance, ) exp = spark_context.createDataFrame(*expected_output) assert sorted(res.collect()) == sorted(exp.collect()) class TestCoveredDays: input_df = ( [ # Straight forward case ( "1", d("2019-01-01"), d("2019-01-01"), d("2019-01-01"), d("2019-01-02"), 1, ), # Overlapping segments ( "2", d("2019-01-01"), d("2019-01-02"), d("2019-01-01"), d("2019-01-03"), 3, ), ( "2", d("2019-01-02"), d("2019-01-03"), d("2019-01-01"), d("2019-01-03"), 3, ), # 1-day gap ( "3", d("2019-01-01"), d("2019-01-02"), d("2019-01-01"), d("2019-01-04"), 4, ), ( "3", d("2019-01-03"), d("2019-01-04"), d("2019-01-01"), d("2019-01-04"), 4, ), # 2-day gap ( "4", d("2019-01-01"), d("2019-01-02"), d("2019-01-01"), d("2019-01-05"), 4, ), ( "4", d("2019-01-04"), d("2019-01-05"), d("2019-01-01"), d("2019-01-05"), 4, ), # Nested Segments ( "5", d("2019-01-01"), d("2019-01-04"), d("2019-01-01"), d("2019-01-05"), 5, ), ( "5", d("2019-01-02"), d("2019-01-03"), d("2019-01-01"), d("2019-01-05"), 5, ), ( "5", d("2019-01-04"), d("2019-01-05"), d("2019-01-01"), d("2019-01-05"), 5, ), # Segment entirely before window ( "6", d("2019-01-01"), d("2019-01-01"), d("2019-01-02"), d("2019-01-02"), 0, ), # Segment entirely after window ( "7", d("2019-01-04"), d("2019-01-05"), d("2019-01-01"), d("2019-01-03"), 0, ), # Segment wider than window ( "8", d("2019-01-01"), d("2019-01-05"), d("2019-01-02"), d("2019-01-03"), 2, ), # Two segments overlapping edge of window ( "9", d("2019-01-01"), d("2019-01-02"), d("2019-01-02"), d("2019-01-04"), 2, ), ( "9", d("2019-01-04"), d("2019-01-05"), d("2019-01-02"), d("2019-01-04"), 2, ), ], [ "entity_id", "segment_start_dt", "segment_end_dt", "window_start_dt", "window_end_dt", "covered_days", ], ) def test_covered_days_window_columns(self, spark_context): input_df = spark_context.createDataFrame(*self.input_df) res = covered_days( df=input_df.drop("covered_days"), group_col="entity_id", segment_start_dt_col="segment_start_dt", segment_end_dt_col="segment_end_dt", window_start_dt=F.col("window_start_dt"), window_end_dt=F.col("window_end_dt"), ) assert sorted(res.collect()) == sorted(input_df.collect()) def test_covered_days_window_dates(self, spark_context): input_df = spark_context.createDataFrame(*self.input_df).filter( F.col("entity_id") == "5" ) res = covered_days( df=input_df.drop("covered_days"), group_col="entity_id", segment_start_dt_col="segment_start_dt", segment_end_dt_col="segment_end_dt", window_start_dt=d("2019-01-01"), window_end_dt=d("2019-01-05"), ) assert sorted(res.collect()) == sorted(input_df.collect()) def test_first_event_in_x_days(spark_context): input_df = spark_context.createDataFrame( [ ("001", d("2019-01-01"), True), ("001", d("2019-01-15"), False), ("001", d("2019-01-22"), True), ("002", d("2019-01-25"), True), ("003", d("2019-03-01"), True), ("003", d("2019-04-01"), True), ("003", d("2019-05-01"), True), ("004", d("2019-06-01"), True), ("004", d("2019-06-01"), False), ("004", d("2019-06-22"), True), ("005", d("2019-01-01"), True), ("005", d("2019-01-22"), True), ("005", d("2019-01-25"), False), ], ["entity_id", "start_dt", "valid_event"], ) res = first_event_in_x_days( df=input_df.drop("valid_event"), group_col="entity_id", start_dt_col="start_dt", days=20, ) assert sorted(res.collect()) == sorted(input_df.collect()) def test_count_events_by_period( spark_context, ): input_df = spark_context.createDataFrame( [ ("001", "A", d("2018-01-01")), ("002", "B", d("2018-01-01")), ("002", "B", d("2018-05-01")), ("002", "C", d("2018-01-01")), ("003", "D", d("2019-01-01")), ("004", "E", d("2018-01-01")), ("004", "E", d("2018-03-01")), ("004", "E", d("2019-05-01")), ("004", "F", d("2018-01-01")), ], ["entity_id", "event_id", "start_dt"], ) res = count_events_by_period( df=input_df, group_col="entity_id", count_col="event_id", dt_col="start_dt", max_dt="2018-03-31", period="M", num_periods=3, count_unique=False, ) exp = spark_context.createDataFrame( [("001", 0, 0, 1), ("002", 0, 0, 2), ("003", 0, 0, 0), ("004", 1, 0, 2)], ["entity_id", "event_cnt_prd0", "event_cnt_prd1", "event_cnt_prd2"], ) assert sorted(res.collect()) == sorted(exp.collect()) class TestAge: input_df_static = ( [("001", d("2004-02-28")), ("002", d("2004-02-29")), ("003", d("2004-03-01"))], ["entity_id", "birth_dt"], ) output_df_feb28 = ([("001", 5), ("002", 5), ("003", 4)], ["entity_id", "age"]) output_df_mar01 = ([("001", 5), ("002", 5), ("003", 5)], ["entity_id", "age"]) output_df_feb28_leap = ([("001", 8), ("002", 7), ("003", 7)], ["entity_id", "age"]) output_df_mar01_leap = ([("001", 8), ("002", 8), ("003", 7)], ["entity_id", "age"]) @pytest.mark.parametrize( "df, age_dt, floor_sw, expected_output", [ (input_df_static, "2009-02-28", True, output_df_feb28), (input_df_static, "2009-03-01", True, output_df_mar01), (input_df_static, "2012-02-28", True, output_df_feb28_leap), (input_df_static, "2012-02-29", True, output_df_mar01_leap), ], ) def test_age(self, spark_context, df, age_dt, floor_sw, expected_output): res = spark_context.createDataFrame(*df).select( "entity_id", age("birth_dt", age_dt, floor_sw).alias("age") ) exp = spark_context.createDataFrame(*expected_output) assert sorted(res.collect()) == sorted(exp.collect())
27.951469
87
0.346569
2,298
21,886
3.153612
0.073977
0.100041
0.079205
0.037257
0.79495
0.748724
0.709121
0.692286
0.65089
0.603836
0
0.262257
0.474367
21,886
782
88
27.987212
0.367698
0.02842
0
0.692201
0
0
0.219768
0.003532
0
0
0
0
0.009749
1
0.011142
false
0
0.005571
0.001393
0.041783
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
82fd753adbf7cb682e8810accafb0e23855e6e62
93
py
Python
enthought/mayavi/components/common.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/mayavi/components/common.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/mayavi/components/common.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from mayavi.components.common import *
23.25
38
0.83871
12
93
6.083333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.11828
93
3
39
31
0.890244
0.129032
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d21a20d580c36681c6c180a18f324b5b6c7f84bc
25
py
Python
Day 30 Conclusions/conclusion.py
Sumanth-Talluri/30-Days-Of-Python
5acdca11126fa45b3d412f9a8555a592f3ec7374
[ "MIT" ]
3
2020-08-29T17:04:19.000Z
2020-09-12T13:28:29.000Z
Day 30 Conclusions/conclusion.py
Sumanth-Talluri/30Days-Of-Python
5acdca11126fa45b3d412f9a8555a592f3ec7374
[ "MIT" ]
null
null
null
Day 30 Conclusions/conclusion.py
Sumanth-Talluri/30Days-Of-Python
5acdca11126fa45b3d412f9a8555a592f3ec7374
[ "MIT" ]
1
2020-11-15T09:07:48.000Z
2020-11-15T09:07:48.000Z
# No exercises on day 30
12.5
24
0.72
5
25
3.6
1
0
0
0
0
0
0
0
0
0
0
0.105263
0.24
25
1
25
25
0.842105
0.88
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d227244dc312a5de71fd68e289881c4b33202be4
96
py
Python
src/game/__init__.py
berkerdemoglu/Chess
5d0d03112502596fbf3d0c1688350b9abc5bc788
[ "CC0-1.0" ]
1
2021-06-27T18:16:53.000Z
2021-06-27T18:16:53.000Z
src/game/__init__.py
berkerdemoglu/Chess
5d0d03112502596fbf3d0c1688350b9abc5bc788
[ "CC0-1.0" ]
null
null
null
src/game/__init__.py
berkerdemoglu/Chess
5d0d03112502596fbf3d0c1688350b9abc5bc788
[ "CC0-1.0" ]
null
null
null
from .game import ChessGame from .launcher import Launcher, LAUNCHER_FEN_KEY, LAUNCHER_SETTINGS
32
67
0.854167
13
96
6.076923
0.615385
0
0
0
0
0
0
0
0
0
0
0
0.104167
96
2
68
48
0.918605
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5