hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15ccf0ad411c06a2a10f53568a644a0ac7444a57
| 175
|
py
|
Python
|
lickport_array_interface/__init__.py
|
peterpolidoro/lickport_array_python
|
380eafdbe010f5f94230cac5332a3094d833f94f
|
[
"BSD-3-Clause"
] | null | null | null |
lickport_array_interface/__init__.py
|
peterpolidoro/lickport_array_python
|
380eafdbe010f5f94230cac5332a3094d833f94f
|
[
"BSD-3-Clause"
] | null | null | null |
lickport_array_interface/__init__.py
|
peterpolidoro/lickport_array_python
|
380eafdbe010f5f94230cac5332a3094d833f94f
|
[
"BSD-3-Clause"
] | 1
|
2021-10-01T18:51:17.000Z
|
2021-10-01T18:51:17.000Z
|
'''
This Python package (lickport_array_interface) creates a class named LickportArrayInterface.
'''
from .lickport_array_interface import LickportArrayInterface, __version__
| 35
| 92
| 0.845714
| 18
| 175
| 7.777778
| 0.777778
| 0.185714
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 175
| 4
| 93
| 43.75
| 0.880503
| 0.525714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c62d617024679f3b3185fa6d6637c879d377a960
| 3,119
|
py
|
Python
|
tests/interpreter/test_gradcam.py
|
Christophe-Jia/InterpretDL
|
5736cb880d3c9bd79241d2ea6cb0490d9e8b089d
|
[
"Apache-2.0"
] | 107
|
2020-07-02T14:25:01.000Z
|
2022-03-31T18:49:01.000Z
|
tests/interpreter/test_gradcam.py
|
Christophe-Jia/InterpretDL
|
5736cb880d3c9bd79241d2ea6cb0490d9e8b089d
|
[
"Apache-2.0"
] | 22
|
2020-07-28T01:57:21.000Z
|
2022-03-31T07:51:36.000Z
|
tests/interpreter/test_gradcam.py
|
Christophe-Jia/InterpretDL
|
5736cb880d3c9bd79241d2ea6cb0490d9e8b089d
|
[
"Apache-2.0"
] | 23
|
2020-07-10T05:08:39.000Z
|
2022-03-31T10:00:04.000Z
|
import unittest
from paddle.vision.models import mobilenet_v2
import numpy as np
from paddle.vision.models.resnet import resnet50
import interpretdl as it
from tests.utils import assert_arrays_almost_equal
class TestGradCAM(unittest.TestCase):
def test_cv(self):
paddle_model = mobilenet_v2(pretrained=True)
img_path = 'imgs/catdog.jpg'
algo = it.GradCAMInterpreter(paddle_model, use_cuda=False)
exp = algo.interpret(img_path, 'features.18.2', visual=False)
result = np.array([exp.mean(), exp.std(), exp.min(), exp.max(), *exp.shape])
desired = np.array([7.08578909e-06, 9.28105146e-06, 0.00000000e+00, 3.74892770e-05,
1.00000000e+00, 7.00000000e+00, 7.00000000e+00])
assert_arrays_almost_equal(self, result, desired)
def test_cv_class(self):
paddle_model = mobilenet_v2(pretrained=True)
img_path = 'imgs/catdog.jpg'
algo = it.GradCAMInterpreter(paddle_model, use_cuda=False)
exp = algo.interpret(img_path, 'features.18.2', label=282, visual=False)
result = np.array([exp.mean(), exp.std(), exp.min(), exp.max(), *exp.shape])
desired = np.array([5.12873930e-06, 7.74075761e-06, 0.00000000e+00, 2.88265182e-05,
1.00000000e+00, 7.00000000e+00, 7.00000000e+00])
assert_arrays_almost_equal(self, result, desired)
def test_cv_layer(self):
paddle_model = mobilenet_v2(pretrained=True)
img_path = 'imgs/catdog.jpg'
algo = it.GradCAMInterpreter(paddle_model, use_cuda=False)
exp = algo.interpret(img_path, 'features.16.conv.3', visual=False)
result = np.array([exp.mean(), exp.std(), exp.min(), exp.max(), *exp.shape])
desired = np.array([2.97199367e-05, 3.79896701e-05, 0.00000000e+00, 1.25247447e-04,
1.00000000e+00, 7.00000000e+00, 7.00000000e+00])
assert_arrays_almost_equal(self, result, desired)
def test_cv_layer_2(self):
paddle_model = mobilenet_v2(pretrained=True)
img_path = 'imgs/catdog.jpg'
algo = it.GradCAMInterpreter(paddle_model, use_cuda=False)
exp = algo.interpret(img_path, 'features.8.conv.3', visual=False)
result = np.array([exp.mean(), exp.std(), exp.min(), exp.max(), *exp.shape])
desired = np.array([1.13254619e-05, 1.62324668e-05, 0.00000000e+00, 6.76311683e-05,
1.00000000e+00, 1.40000000e+01, 1.40000000e+01])
assert_arrays_almost_equal(self, result, desired, 2e-3)
def test_cv_multiple_inputs(self):
paddle_model = mobilenet_v2(pretrained=True)
img_path = ['imgs/catdog.jpg', 'imgs/catdog.jpg']
algo = it.GradCAMInterpreter(paddle_model, use_cuda=False)
exp = algo.interpret(img_path, 'features.18.2', visual=False)
result = np.array([exp.mean(), exp.std(), exp.min(), exp.max(), *exp.shape])
desired = np.array([7.08578864e-06, 9.28105146e-06, 0.00000000e+00, 3.74892770e-05,
2.00000000e+00, 7.00000000e+00, 7.00000000e+00])
assert_arrays_almost_equal(self, result, desired)
if __name__ == '__main__':
unittest.main()
| 42.148649
| 91
| 0.667522
| 441
| 3,119
| 4.566893
| 0.213152
| 0.098312
| 0.047666
| 0.083416
| 0.763158
| 0.763158
| 0.763158
| 0.743297
| 0.743297
| 0.743297
| 0
| 0.16337
| 0.189484
| 3,119
| 74
| 92
| 42.148649
| 0.633307
| 0
| 0
| 0.518519
| 0
| 0
| 0.055128
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.092593
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d69df20462c18e565f5277ccad20232dfcef037d
| 81
|
py
|
Python
|
bindings/pydairlib/multibody/__init__.py
|
DavidDePauw1/dairlib
|
3c75c8f587927b12a58f2e88dda61cc0e7dc82a3
|
[
"BSD-3-Clause"
] | 32
|
2019-04-15T03:10:26.000Z
|
2022-03-28T17:27:03.000Z
|
bindings/pydairlib/multibody/__init__.py
|
DavidDePauw1/dairlib
|
3c75c8f587927b12a58f2e88dda61cc0e7dc82a3
|
[
"BSD-3-Clause"
] | 157
|
2019-02-21T03:13:57.000Z
|
2022-03-09T19:13:59.000Z
|
bindings/pydairlib/multibody/__init__.py
|
DavidDePauw1/dairlib
|
3c75c8f587927b12a58f2e88dda61cc0e7dc82a3
|
[
"BSD-3-Clause"
] | 22
|
2019-03-02T22:31:42.000Z
|
2022-03-10T21:28:50.000Z
|
# Importing everything in this directory to this package
from .multibody import *
| 40.5
| 56
| 0.814815
| 11
| 81
| 6
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 2
| 57
| 40.5
| 0.956522
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d6cf665c210e285ad471f8209e2220b1dc0d3b18
| 231
|
py
|
Python
|
8kyu/Sum of positive.py
|
walkgo/codewars_tasks
|
4c0ab6f0e1d2181318fc15b12dd55ef565ecd223
|
[
"MIT"
] | null | null | null |
8kyu/Sum of positive.py
|
walkgo/codewars_tasks
|
4c0ab6f0e1d2181318fc15b12dd55ef565ecd223
|
[
"MIT"
] | null | null | null |
8kyu/Sum of positive.py
|
walkgo/codewars_tasks
|
4c0ab6f0e1d2181318fc15b12dd55ef565ecd223
|
[
"MIT"
] | null | null | null |
def positive_sum(arr):
positive_list = []
for i in arr:
if i > 0:
positive_list.append(i)
return(sum(positive_list))
# Best Practices
def positive_sum(arr):
return sum(x for x in arr if x > 0)
| 19.25
| 39
| 0.606061
| 37
| 231
| 3.648649
| 0.405405
| 0.266667
| 0.207407
| 0.251852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01227
| 0.294372
| 231
| 11
| 40
| 21
| 0.815951
| 0.060606
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.375
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
d6eba3ce3823719c04ea0546f80127e90d56a123
| 135
|
py
|
Python
|
doudian/__init__.py
|
minibear2021/doudian
|
c770299dd2bd92814851de6e0f73b2c18c71d130
|
[
"MIT"
] | 5
|
2021-12-01T16:05:16.000Z
|
2022-03-11T10:19:10.000Z
|
doudian/__init__.py
|
minibear2021/doudian
|
c770299dd2bd92814851de6e0f73b2c18c71d130
|
[
"MIT"
] | null | null | null |
doudian/__init__.py
|
minibear2021/doudian
|
c770299dd2bd92814851de6e0f73b2c18c71d130
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .core import DouDian
from .exception import CodeError, ShopIdError, TokenError
from .type import AppType
| 22.5
| 57
| 0.748148
| 17
| 135
| 5.941176
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 0.148148
| 135
| 5
| 58
| 27
| 0.869565
| 0.155556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ba379b5d1c3ea64a4881ae33d241982bcb79c7c7
| 257
|
py
|
Python
|
lambdata/__init__.py
|
PatrickRaborn/Lambdata
|
4799075fda0db5e7c25e9ed6d92a353063ea14d8
|
[
"MIT"
] | null | null | null |
lambdata/__init__.py
|
PatrickRaborn/Lambdata
|
4799075fda0db5e7c25e9ed6d92a353063ea14d8
|
[
"MIT"
] | null | null | null |
lambdata/__init__.py
|
PatrickRaborn/Lambdata
|
4799075fda0db5e7c25e9ed6d92a353063ea14d8
|
[
"MIT"
] | null | null | null |
"""lambdata - A collections of DS helper functions"""
import pandas as pd
import numpy as np
def increment(x):
return x + 1
COLORS =['Blue', 'Mauve', 'Cyan', 'Teal']
def df_cleaner(df):
'''Cleans pd.DataFrame'''
# TODO - Implement
pass
| 16.0625
| 53
| 0.634241
| 36
| 257
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005
| 0.22179
| 257
| 15
| 54
| 17.133333
| 0.805
| 0.330739
| 0
| 0
| 0
| 0
| 0.10625
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.285714
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
ba3d99d3abec6f230fa43db72127273ec053d09d
| 79
|
py
|
Python
|
step 319.py
|
blulady/python
|
65d8e99f6411cf79be0353abc99a2677dfeebe11
|
[
"bzip2-1.0.6"
] | null | null | null |
step 319.py
|
blulady/python
|
65d8e99f6411cf79be0353abc99a2677dfeebe11
|
[
"bzip2-1.0.6"
] | null | null | null |
step 319.py
|
blulady/python
|
65d8e99f6411cf79be0353abc99a2677dfeebe11
|
[
"bzip2-1.0.6"
] | 1
|
2020-09-11T16:05:46.000Z
|
2020-09-11T16:05:46.000Z
|
import math
print (math.sqrt(64))
import random
print (random.randint(0,100))
| 13.166667
| 29
| 0.746835
| 13
| 79
| 4.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 0.113924
| 79
| 5
| 30
| 15.8
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
ba5671c4a8ed2b52625bf2d81b76a47b10813232
| 170
|
py
|
Python
|
notes/components/__init__.py
|
diefenbach/cba-notes
|
48adfa6fa98246212fcfe350f3b9392ec44ad3ef
|
[
"BSD-3-Clause"
] | null | null | null |
notes/components/__init__.py
|
diefenbach/cba-notes
|
48adfa6fa98246212fcfe350f3b9392ec44ad3ef
|
[
"BSD-3-Clause"
] | null | null | null |
notes/components/__init__.py
|
diefenbach/cba-notes
|
48adfa6fa98246212fcfe350f3b9392ec44ad3ef
|
[
"BSD-3-Clause"
] | null | null | null |
from . login import Login
from . main_menu import MainMenu
from . note_edit import NoteEdit
from . note_display import NoteDisplay
from . tag_explorer import TagExplorer
| 28.333333
| 38
| 0.823529
| 24
| 170
| 5.666667
| 0.583333
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 170
| 5
| 39
| 34
| 0.937931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ba5b7d53934f5169cd6a8e0af077287643506f14
| 68
|
py
|
Python
|
AntPool/__init__.py
|
cclauss/AntPool
|
a28e8f5e93801166ab7402793d49292effdb0dc3
|
[
"MIT"
] | 3
|
2022-03-20T02:15:50.000Z
|
2022-03-22T22:39:17.000Z
|
AntPool/__init__.py
|
cclauss/AntPool
|
a28e8f5e93801166ab7402793d49292effdb0dc3
|
[
"MIT"
] | 1
|
2022-03-23T09:43:31.000Z
|
2022-03-23T09:43:31.000Z
|
AntPool/__init__.py
|
cclauss/AntPool
|
a28e8f5e93801166ab7402793d49292effdb0dc3
|
[
"MIT"
] | 1
|
2022-03-20T06:31:40.000Z
|
2022-03-20T06:31:40.000Z
|
from AntPool.AntPool import AntPoolExecutor, __version__, __author__
| 68
| 68
| 0.882353
| 7
| 68
| 7.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 68
| 1
| 68
| 68
| 0.825397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ba6ed4d11cec8302e387de32994ab02c2e93cf08
| 5,935
|
py
|
Python
|
huya.py
|
Ltre/SomeUseful
|
a03009ebd8c8080abdab5d639dede4c638e32f62
|
[
"MIT"
] | 1
|
2021-01-03T07:45:25.000Z
|
2021-01-03T07:45:25.000Z
|
huya.py
|
Ltre/SomeUseful
|
a03009ebd8c8080abdab5d639dede4c638e32f62
|
[
"MIT"
] | null | null | null |
huya.py
|
Ltre/SomeUseful
|
a03009ebd8c8080abdab5d639dede4c638e32f62
|
[
"MIT"
] | null | null | null |
import requests,os,time
import toml
def get_headers(header_raw):
return dict(line.split(": ", 1) for line in header_raw.split("\n") if line != '')
def get_cookies(cookie_raw):
return dict(line.split("=", 1) for line in cookie_raw.split("; "))
'''hcookies = {"Cookie":
"SoundValue=0.50; alphaValue=0.80; __yamid_tt1=0.5630173980060627; __yamid_new=C8736F6698800001A3314BF01CD08350; udb_guiddata=4d0af64ce63b43f29a7a5975d914b205; first_username_flag=35184377273454hy_first_1; udb_accdata=undefined; Hm_lvt_51700b6c722f5bb4cf39906a596ea41f=1576679026,1576732023,1577338814,1577958774; guid=0ad6867c39df195e6201245925596308; udb_passdata=3; __yasmid=0.5630173980060627; isInLiveRoom=true; web_qrlogin_confirm_id=7c2b76b3-3478-4027-8623-8a781b8bdb42; udb_other=%7B%22lt%22%3A%221583155678597%22%2C%22isRem%22%3A%221%22%7D; udb_uid=1199513272235; yyuid=1199513272235; udb_passport=35184377273454hy; username=35184377273454hy; udb_version=1.0; udb_origin=0; udb_status=1; rep_cnt=17; h_unt=1583155740; __yaoldyyuid=1199513272235; _yasids=__rootsid%3DC8D04226B2600001E867E8201711C0A0; huya_flash_rep_cnt=16; udb_biztoken=AQCPgSb_RAp2Lkq_LRPzj-_3SooD7ucFltQSNoc09Z6JU3sujkVXK9djBMBhMuMScB6e27Y9xm4GX-U-j5aUATaeg26L4-ghXpi0qjWVMLkx0oC7WNpy2LIrs6RC9e6Z4UM3b0EkQEooqZDHMPRs6eiVfRvCtOuYqQjLoCUCxLtrSKPwC2Fsso4qAZFonDKQijGLUuD8WsAmj8kYe4T3XQ77DF15J0UEJPTi8iLWHmqCjLq3Sn4ewLBV8rInE6gyW7KVR398oLwQHTJJIqaPNQnG3eBTdFxqe3Pk2hcWfrjeBtlek34BpsyOap59iH6fn6rFgQTTW0ZnTt4-_FohOZAb; PHPSESSID=8q4kdj213rm41gjatak7lhg383; undefined=undefined; huya_web_rep_cnt=16"
}'''
hcookies_raw = toml.load("/root/u/huya.conf")['hcookies_raw']
hcookies=get_cookies(hcookies_raw)
headers={"Accept":"application/json, text/javascript, */*; q=0.01","User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.0 Safari/534.20 QBWebViewUA/2 QBWebViewType/1 WKType/1","Referer":"http//i.huya.com/","Accept-Language":"zh-cn"}
url = 'https://fw.huya.com/dispatch?do=subscribeList&uid=1199513272235&page=1&pageSize=1000'
if not os.path.exists('huser.txt'):
os.makedirs('huser.txt')
namelist = open('huser.txt').read().splitlines()
if namelist:
print(len(namelist))
upurl='https://udblgn.huya.com/web/cookieExchange'
upheaders_raw='''user-agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36
content-type: application/json
accept: */*
origin: https://udblgn.huya.com
sec-fetch-site: same-origin
sec-fetch-mode: cors
referer: https://udblgn.huya.com/proxy.html'''
upheaders=get_headers(upheaders_raw)
d={"uri":20009,"version":"1.0","context":"1","appId":"5002","lcid":"2052","data":{"info":"w5/Ch8Obw5rDjcObwqvDksORw5vDosKRwqrCk8K/w6LDrsOew6LDo8OZwqjCr8KpwqzCncKmw5bDqcOvw6bDssO7w7jCpsOVw5zCqcK7wrvCusK9w4nCr8Onw6DDqcOJw4jCvsK2w5jEiMSJxIbEgMOzxILEgMOqxInElcORw5jDl8Ocw5TDmsOew4nDksO2w7TEgcO7w7vDnMORxJ7EnMSfxJrDlsO+xJ3EnMSlxKrDpcOdxIHEp8SyxLDEr8Sow7PDvcO2w7XClMKTwpnCoMKgwqDCmMKcwqDCpsKOw4LDkcOXw5PDpcOdwqTCq8Kqwq/Cp8KtwrHCnsKpwqDDrsO1w7XDp8O1w4zDqsOvw67DsMO9wqzDhcK9wr3DhsK/wrzCs8SBxIjEiMO6xIjDrsSBw73EjsSDwr7Dl8OPw5jDksORw47DhcSXxIjEmMSMxI3El8O2xJDEksShw5DDqcOgw53DlMSmxJfEp8SbxJzEpsSNxKnEq8Oew7fDrsOrw6LEqcSnxKzEq8StxLrDqcKewpbClsKfwpjClcKMw6LDlcORw6LDl8KSwqvCo8KswqbCpcKiwpnDm8Oow6bDqsOuw4HDo8Ovw7TDqcKkwr3CtsK5wrLCqcO4w7LEgsOww7jDkcOzw7/EhMO5wrTDjcOGw4nDgsK5xIjEhcSPxILEhcSLw4DDmcSbw4PDpcSLxJbElMSTxIzDiMO5w67DscOMw73EmsSkxJfEmsSgw5XDrsSww5jEm8SdxKzDnMO1w57EjcStxLHEtMSixKTEr8Spw6XEisS2w4fDmsOTw4zDlsOdworCscObw5/Dm8OQw6TCk8KewpXDmsOew6LDnMOmw5rDp8Ogwp7Ct8Kgw6jDrsO1w6fDtcOyw6bDssK0w7jDrcOwwrjEgsO2w7PEhsO1xIPCtMSQw4DCt8OZw7/EisSIxIfEgMK8w63DosOlw4DDt8SLxIjEm8SKxJjDicOixKTDjMSPxJHEoMOQw6nDksOTw57DlcSaxJ7EosScxKbEmsSnxKDDnsO3w6DErMSoxKvEqMSlxLHEqcStxKrDisOPw4jDicOYw4rDj8Oaw5bDnMOUw57DmMOgw5fDmcObw57Dm8Ofw6LDmsOjwp3DucKpwqDDjcOhw7XDq8O5w6nCpcOJw7PDscOuw7jDv8Kuw4fEicKxw7TDtsSFwrXDjsK3wrjDg8K6w7/Eg8SHxIHEi8O/xIzEhcODw5zDhcSNxJPEmsSMxJrEl8SLxJfDmcSbxI/EksScw57EosSfxKnEnMSfxKXDmsS2xLfDp8OexKnEn8StxKfEtsSjxKrEqcS4w6jEgcKGw5/DjsKUwqvCt8KWw6XDlMKZw5jDkMKSwp3ClMOWw6PDpMOhw6DDncK+w6jDnMOew6nDo8OjwqLCu8Kkw7zDqcO4wqjCs8Kqw6rDusO7w4/DvMOyw7TDnsOyw7/DuMK2w4/CuMOkxIfEk8SDxIfEiMO+w4DDi8OCxILEksSTw7LEhsSTxIzDisOjw4zDucSRxKHEocSSxJHEocSXw5XDoMOXxKbEo8SZxK3EoMSqxK7EqsOgw7nDosSYxKvEscO3w7fDqMOzwobDiMOHw5XDnsOKw53CscOcwo/CqMKRwqDCqsKiw5XCrcKqwqjCrsKrwqvCsMKswrHCtMK3wrPCtcOnwrTCtcOlwrjCvsK3wrjDgMOww4DDgsK9w4fCv8O2w4XDgsODw7nDu8O3w4nCusOFwrzEjsSAxIbEgsOBw5rDg8OEw4/DhsSNxIfEmsO0xJjEjcSMxJjEgMSixJ7EosSSxJnEmMOWw6/EqsSpxK3EnsOmw53EpMSexLHEksSlxLTEtcSsxLPEs8SZxLvDk8OXw4fDjsONwovCpMOfw57DosOTwpvCksOZw5PDpsK9w6PDmsOcw7DCvcOcwp3CtsOxw7DDtMOlwq3CpMOsw7fDiMOnw7XDvsOqw73DnsSBw73DvsO+xILEhcO3w7fCtsOPxIrEicSNw77DhsK9w77Ej8SNxJbEhcSTw4TDncOGxIjEjsSZxJfElsSPw43DmMOPxJfEosO/xKHEl8Shw7fEpMSkxKrEp8SlxJ/DncO2xKPEn8SrxLPEpsOuw6XEscSuxKrDqcKewofCqcKfwp/CnMKgwrHCosKjwqfCp8KowqHCosKjwqTCpsK3wqrCq8Kqwq7CvcOCwq3Cr8OCw4TCscK6wrbCucK1wqjEhA=="}}
s=requests.session()
s.cookies.update(hcookies)
while 1:
try:
r = s.get(url,timeout=10)
data = r.json()['result']
dlist = data['list']
liveCount = data['liveCount']
with open('huser.txt','a') as f:
for i in dlist:
name = str(i['profileRoom'])
if name not in namelist:
print(time.strftime('%Y_%m_%d-%H:%M:%S'),name)
f.write(name)
f.write('\n')
if not name in namelist:
namelist.append(name)
except Exception as e:
if not 'time' in str(e):
print(e,r.json())
while 1:
try:
r = s.post(upurl,headers=upheaders,data=d,allow_redirects=False,timeout=10)
print(r.status_code,r.headers)
break
except:
print('登录失败')
finally:
r.close()
time.sleep(5)
| 98.916667
| 2,325
| 0.810447
| 485
| 5,935
| 9.791753
| 0.527835
| 0.00737
| 0.009476
| 0.011371
| 0.028638
| 0.024005
| 0.013477
| 0.013477
| 0.013477
| 0
| 0
| 0.150898
| 0.098905
| 5,935
| 59
| 2,326
| 100.59322
| 0.737098
| 0
| 0
| 0.072727
| 0
| 0.072727
| 0.668326
| 0.483221
| 0
| 1
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.036364
| 0.036364
| 0.109091
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ba7926b5ec5d11b5c5fe529b4b531ee228243847
| 39
|
py
|
Python
|
custom_components/classificationbox/__init__.py
|
LucaKaufmann/HomeAssistant-Config
|
3be0ab0a91a2ff188abf1e0a9d0dd4dea7d30d45
|
[
"MIT"
] | 19
|
2018-05-30T08:07:26.000Z
|
2020-11-29T13:31:20.000Z
|
custom_components/classificationbox/__init__.py
|
LucaKaufmann/Home-AssistantConfig
|
3be0ab0a91a2ff188abf1e0a9d0dd4dea7d30d45
|
[
"MIT"
] | 6
|
2018-05-30T17:56:20.000Z
|
2022-03-14T12:07:42.000Z
|
custom_components/classificationbox/__init__.py
|
LucaKaufmann/Home-AssistantConfig
|
3be0ab0a91a2ff188abf1e0a9d0dd4dea7d30d45
|
[
"MIT"
] | 7
|
2018-07-25T09:56:54.000Z
|
2022-03-14T11:59:37.000Z
|
"""The classificationbox component."""
| 19.5
| 38
| 0.74359
| 3
| 39
| 9.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.805556
| 0.820513
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ba7a028a544be2bb9a1f6b38fd1f8856465ebe85
| 430
|
py
|
Python
|
repositorybots/events/IssueEvent.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 2
|
2021-09-27T02:29:26.000Z
|
2021-10-20T19:10:39.000Z
|
repositorybots/events/IssueEvent.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 14
|
2021-09-09T21:16:05.000Z
|
2022-03-28T09:31:09.000Z
|
repositorybots/events/IssueEvent.py
|
conda/conda-bots
|
a68cff7b0318093328e355e18871518c050f5493
|
[
"BSD-3-Clause"
] | 2
|
2021-09-09T12:11:48.000Z
|
2022-01-28T20:25:26.000Z
|
from abc import ABC, abstractmethod
class IssueEvent(ABC):
@property
@abstractmethod
def github_conn(self):
pass
@property
@abstractmethod
def event_body(self):
pass
@abstractmethod
def get_pull_request_author(self):
pass
@abstractmethod
def add_comment(self, comment_body):
pass
@abstractmethod
def add_label(self, label_name):
pass
| 15.357143
| 40
| 0.639535
| 46
| 430
| 5.782609
| 0.478261
| 0.319549
| 0.236842
| 0.18797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.295349
| 430
| 27
| 41
| 15.925926
| 0.877888
| 0
| 0
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0.263158
| 0.052632
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ba85cc834390d71bcb89c34cf27ebb83c9e68da1
| 152
|
py
|
Python
|
sampleapi/books/admin.py
|
zachtib/SampleApi
|
becdae90501af62d655ffb6fe66719d519f37ccb
|
[
"Apache-2.0"
] | 1
|
2016-10-05T19:13:05.000Z
|
2016-10-05T19:13:05.000Z
|
sampleapi/books/admin.py
|
zachtib/SampleApi
|
becdae90501af62d655ffb6fe66719d519f37ccb
|
[
"Apache-2.0"
] | null | null | null |
sampleapi/books/admin.py
|
zachtib/SampleApi
|
becdae90501af62d655ffb6fe66719d519f37ccb
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Author, Book
admin.site.register(Author)
admin.site.register(Book)
| 16.888889
| 32
| 0.789474
| 22
| 152
| 5.454545
| 0.545455
| 0.15
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 152
| 8
| 33
| 19
| 0.902256
| 0.171053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bae5caf131b8f7e02ea76b01a1df77510179fefc
| 69
|
py
|
Python
|
UVRatio/ui/__init__.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
UVRatio/ui/__init__.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
UVRatio/ui/__init__.py
|
chrisdevito/UVRatio
|
10411e07d2de47ee760996db484a8185323b63cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from UVRatio.ui import ui
| 13.8
| 25
| 0.623188
| 11
| 69
| 3.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.15942
| 69
| 4
| 26
| 17.25
| 0.724138
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
baf4e55bff83bb830427d583fc4923d31c074397
| 135
|
py
|
Python
|
tests/inner_tests/fixture_hook/test_fixture_hook_examples.py
|
j19sch/pytest-instrument
|
53e26a2c507456327887e007fd2609e71ec52999
|
[
"MIT"
] | null | null | null |
tests/inner_tests/fixture_hook/test_fixture_hook_examples.py
|
j19sch/pytest-instrument
|
53e26a2c507456327887e007fd2609e71ec52999
|
[
"MIT"
] | null | null | null |
tests/inner_tests/fixture_hook/test_fixture_hook_examples.py
|
j19sch/pytest-instrument
|
53e26a2c507456327887e007fd2609e71ec52999
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def fixture_to_filter_out():
pass
def test_using_fixture(fixture_to_filter_out):
assert True
| 12.272727
| 46
| 0.777778
| 20
| 135
| 4.85
| 0.6
| 0.185567
| 0.309278
| 0.371134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162963
| 135
| 10
| 47
| 13.5
| 0.858407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
24078583aeebc925b2bf44e90ccd4c75785ac7ca
| 203
|
py
|
Python
|
fastapi_depends/__init__.py
|
troyan-dy/fastapi-depends
|
aa42aec82e36cc7be0ddc5a51a331563ac412708
|
[
"MIT"
] | null | null | null |
fastapi_depends/__init__.py
|
troyan-dy/fastapi-depends
|
aa42aec82e36cc7be0ddc5a51a331563ac412708
|
[
"MIT"
] | null | null | null |
fastapi_depends/__init__.py
|
troyan-dy/fastapi-depends
|
aa42aec82e36cc7be0ddc5a51a331563ac412708
|
[
"MIT"
] | 1
|
2022-03-02T19:38:55.000Z
|
2022-03-02T19:38:55.000Z
|
from fastapi_depends.dep_container import DepContainer
from fastapi_depends.fake_request import FakeRequest
from fastapi_depends.simple import inject
__all__ = ("FakeRequest", "inject", "DepContainer")
| 33.833333
| 54
| 0.842365
| 24
| 203
| 6.75
| 0.541667
| 0.203704
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08867
| 203
| 5
| 55
| 40.6
| 0.875676
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
24253d68faf65d827fff17d1bf9982957787807a
| 1,338
|
py
|
Python
|
scripts/field/angelic_tutoA.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/field/angelic_tutoA.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/field/angelic_tutoA.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# Map ID :: 940012010
# Hidden Street : Decades Later
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.removeSkill(60011219)
if not "1" in sm.getQRValue(25807):
sm.levelUntil(10)
sm.setJob(6500)
sm.createQuestWithQRValue(25807, "1")
sm.resetStats()
# Unhandled Stat Changed [HP] Packet: 00 00 00 04 00 00 00 00 00 00 C2 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MHP] Packet: 00 00 00 08 00 00 00 00 00 00 C2 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MMP] Packet: 00 00 00 20 00 00 00 00 00 00 71 00 00 00 FF 00 00 00 00
# Unhandled Stat Changed [MHP] Packet: 00 00 00 08 00 00 00 00 00 00 58 01 00 00 FF 00 00 00 00
# Unhandled Stat Changed [HP] Packet: 00 00 00 04 00 00 00 00 00 00 58 01 00 00 FF 00 00 00 00
sm.addSP(5, True)
# [INVENTORY_GROW] [01 1C ]
# [INVENTORY_GROW] [02 1C ]
# [INVENTORY_GROW] [03 1C ]
# [INVENTORY_GROW] [04 1C ]
sm.giveSkill(60011216, 1, 1)
sm.giveSkill(60011218, 1, 1)
sm.giveSkill(60011220, 1, 1)
sm.giveSkill(60011222, 1, 1)
sm.sendDelay(300)
sm.showFieldEffect("kaiser/text0", 0)
sm.sendDelay(4200)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
# [FORCED_STAT_RESET] []
sm.warp(940011020, 0)
| 33.45
| 99
| 0.682362
| 231
| 1,338
| 3.926407
| 0.298701
| 0.255788
| 0.251378
| 0.176406
| 0.449835
| 0.346196
| 0.332966
| 0.332966
| 0.332966
| 0.332966
| 0
| 0.273333
| 0.215247
| 1,338
| 39
| 100
| 34.307692
| 0.590476
| 0.49701
| 0
| 0.1
| 0
| 0
| 0.021244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
032326801fa121b2d59e5299e76d4c135cb76a9e
| 233
|
py
|
Python
|
zentral/contrib/mdm/views/__init__.py
|
janheise/zentral
|
cd809483573301e7d1aa5d3fc2da2c74a62405ab
|
[
"Apache-2.0"
] | 634
|
2015-10-30T00:55:40.000Z
|
2022-03-31T02:59:00.000Z
|
zentral/contrib/mdm/views/__init__.py
|
janheise/zentral
|
cd809483573301e7d1aa5d3fc2da2c74a62405ab
|
[
"Apache-2.0"
] | 145
|
2015-11-06T00:17:33.000Z
|
2022-03-16T13:30:31.000Z
|
zentral/contrib/mdm/views/__init__.py
|
janheise/zentral
|
cd809483573301e7d1aa5d3fc2da2c74a62405ab
|
[
"Apache-2.0"
] | 103
|
2015-11-07T07:08:49.000Z
|
2022-03-18T17:34:36.000Z
|
from .dep import * # NOQA
from .inventory import * # NOQA
from .management import * # NOQA
from .mdm import * # NOQA
from .ota import * # NOQA
from .scep import * # NOQA
from .setup import * # NOQA
from .user import * # NOQA
| 25.888889
| 33
| 0.656652
| 32
| 233
| 4.78125
| 0.34375
| 0.522876
| 0.640523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240343
| 233
| 8
| 34
| 29.125
| 0.864407
| 0.167382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
032416e827d0402ce0a5416a4c729d44d0b88d64
| 165
|
py
|
Python
|
autofront/tests/import_script.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | 1
|
2020-11-16T22:18:03.000Z
|
2020-11-16T22:18:03.000Z
|
autofront/tests/import_script.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
autofront/tests/import_script.py
|
JimmyLamothe/autofront
|
d179e54411f5d53046a5fa52b4430e09b01ebaca
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
from simple_functions import foo, return_value
print('sys.path: ' + str(sys.path))
print('cwd: ' + os.getcwd())
print('__file__: ' + __file__)
| 20.625
| 46
| 0.709091
| 24
| 165
| 4.458333
| 0.625
| 0.130841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 165
| 7
| 47
| 23.571429
| 0.748252
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
032b1c40e52bd66083e531b48405bb610b740f02
| 125
|
py
|
Python
|
awspice/modules/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | 1
|
2020-08-04T18:22:41.000Z
|
2020-08-04T18:22:41.000Z
|
awspice/modules/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | null | null | null |
awspice/modules/__init__.py
|
Telefonica/awspice
|
da6f6ee0a8d7a7206c1ea5e7ca8bbc83716b29fb
|
[
"Apache-2.0"
] | 2
|
2019-04-03T16:56:19.000Z
|
2019-05-06T19:41:26.000Z
|
# -*- coding: utf-8 -*-
from .finder import FinderModule
from .security import SecurityModule
from .stats import StatsModule
| 25
| 36
| 0.768
| 15
| 125
| 6.4
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.136
| 125
| 4
| 37
| 31.25
| 0.87963
| 0.168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0349d8001089d30a6847530414266cfab287457c
| 39
|
py
|
Python
|
livereload/management/__init__.py
|
Fantomas42/django-livereload
|
1170b6729667a6164e5e47776781b2a7f6b2c0d3
|
[
"BSD-3-Clause"
] | 63
|
2015-01-02T03:07:50.000Z
|
2022-01-06T13:53:07.000Z
|
livereload/management/__init__.py
|
Fantomas42/django-livereload
|
1170b6729667a6164e5e47776781b2a7f6b2c0d3
|
[
"BSD-3-Clause"
] | 12
|
2015-02-26T20:04:17.000Z
|
2021-08-25T05:24:04.000Z
|
livereload/management/__init__.py
|
Fantomas42/django-livereload
|
1170b6729667a6164e5e47776781b2a7f6b2c0d3
|
[
"BSD-3-Clause"
] | 18
|
2015-02-24T22:23:51.000Z
|
2017-01-22T16:00:25.000Z
|
"""Management for django-livereload"""
| 19.5
| 38
| 0.74359
| 4
| 39
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.805556
| 0.820513
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
037bfd92ee3af53e6f7f943cc52fccf9bc98b1aa
| 209
|
py
|
Python
|
limis/core/environment.py
|
pstreck/limis
|
2316bacc10f0cc7fb17774511ca6f695d2e6c195
|
[
"MIT"
] | null | null | null |
limis/core/environment.py
|
pstreck/limis
|
2316bacc10f0cc7fb17774511ca6f695d2e6c195
|
[
"MIT"
] | null | null | null |
limis/core/environment.py
|
pstreck/limis
|
2316bacc10f0cc7fb17774511ca6f695d2e6c195
|
[
"MIT"
] | null | null | null |
"""
limis core - environment
Environment variables set for a project.
"""
LIMIS_PROJECT_NAME_ENVIRONMENT_VARIABLE = 'LIMIS_PROJECT_NAME'
LIMIS_PROJECT_SETTINGS_ENVIRONMENT_VARIABLE = 'LIMIS_PROJECT_SETTINGS'
| 26.125
| 70
| 0.842105
| 25
| 209
| 6.56
| 0.44
| 0.292683
| 0.195122
| 0.378049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 209
| 7
| 71
| 29.857143
| 0.863158
| 0.315789
| 0
| 0
| 0
| 0
| 0.296296
| 0.162963
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3012262a4b028060d7bdd2e986f7c5314aa0cee6
| 216
|
py
|
Python
|
seventh/flask/formatters.py
|
iamjillsanluis/flasklab
|
d6f71e3e42ab72462b04df62b4f67474c4ee5b6f
|
[
"MIT"
] | null | null | null |
seventh/flask/formatters.py
|
iamjillsanluis/flasklab
|
d6f71e3e42ab72462b04df62b4f67474c4ee5b6f
|
[
"MIT"
] | 6
|
2020-05-03T00:16:26.000Z
|
2020-07-30T01:51:38.000Z
|
seventh/flask/formatters.py
|
iamjillsanluis/flasklab
|
d6f71e3e42ab72462b04df62b4f67474c4ee5b6f
|
[
"MIT"
] | null | null | null |
def response_json(target):
def decorator(*args, **kwargs):
response = target(*args, **kwargs)
# TODO: you can add your error handling in here
return response.json()
return decorator
| 24
| 55
| 0.634259
| 26
| 216
| 5.230769
| 0.653846
| 0.176471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263889
| 216
| 8
| 56
| 27
| 0.855346
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
30204f58f0fa4bbaaf76a5ce66241e7607f1212e
| 79
|
py
|
Python
|
sfybook/mbasic_home_header/__init__.py
|
Scr44gr/sfybook
|
8662aee8b324a9074fdd3313c00c90e189a7c544
|
[
"Apache-2.0"
] | 1
|
2020-09-06T14:58:00.000Z
|
2020-09-06T14:58:00.000Z
|
sfybook/mbasic_home_header/__init__.py
|
Scr44gr/sfybook
|
8662aee8b324a9074fdd3313c00c90e189a7c544
|
[
"Apache-2.0"
] | null | null | null |
sfybook/mbasic_home_header/__init__.py
|
Scr44gr/sfybook
|
8662aee8b324a9074fdd3313c00c90e189a7c544
|
[
"Apache-2.0"
] | null | null | null |
"""
Author: Scr44gr
"""
from sfybook.mbasic_home_header.pages import Pages
| 15.8
| 51
| 0.734177
| 10
| 79
| 5.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0.151899
| 79
| 4
| 52
| 19.75
| 0.80597
| 0.189873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
302f2a140ca2f8982db3d2ff2b0371362eecaee9
| 169
|
py
|
Python
|
nlp_project/src/crf.py
|
Lord-Cthulhu/NLP_Tokenizer
|
7d766e5bd5a88d1f49636fd19bfb3b6bcbeb6342
|
[
"RSA-MD"
] | null | null | null |
nlp_project/src/crf.py
|
Lord-Cthulhu/NLP_Tokenizer
|
7d766e5bd5a88d1f49636fd19bfb3b6bcbeb6342
|
[
"RSA-MD"
] | null | null | null |
nlp_project/src/crf.py
|
Lord-Cthulhu/NLP_Tokenizer
|
7d766e5bd5a88d1f49636fd19bfb3b6bcbeb6342
|
[
"RSA-MD"
] | null | null | null |
import tensorflow as tf
from keras_crf import CRFModel
from keras.layers import LSTM, Embedding, Dense, Dropout, Bidirectional
from keras_contrib.layers import CRF
| 18.777778
| 71
| 0.816568
| 24
| 169
| 5.666667
| 0.625
| 0.198529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147929
| 169
| 8
| 72
| 21.125
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
305d9c2046a0862470e8df3755376ec710c07dec
| 16
|
py
|
Python
|
Serial.py
|
rohanbaba/RMCS-220X-RPi
|
885fc2b1186682cbe39f02f452eea11ae24ffb0b
|
[
"MIT"
] | null | null | null |
Serial.py
|
rohanbaba/RMCS-220X-RPi
|
885fc2b1186682cbe39f02f452eea11ae24ffb0b
|
[
"MIT"
] | null | null | null |
Serial.py
|
rohanbaba/RMCS-220X-RPi
|
885fc2b1186682cbe39f02f452eea11ae24ffb0b
|
[
"MIT"
] | null | null | null |
print("Serial")
| 8
| 15
| 0.6875
| 2
| 16
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 16
| 1
| 16
| 16
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
306ac51f7b4b05c17e7481f1a1c1252ac2e373da
| 56
|
py
|
Python
|
Ene-Jun-2021/pena-balderas-bryan/prueba.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ene-Jun-2021/pena-balderas-bryan/prueba.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ene-Jun-2021/pena-balderas-bryan/prueba.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
print('prueba para clase de DAS,cambio para nuevo pull')
| 56
| 56
| 0.785714
| 10
| 56
| 4.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 56
| 1
| 56
| 56
| 0.897959
| 0
| 0
| 0
| 0
| 0
| 0.824561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
306d21d685f6390715be2f03976b50fd3da5049d
| 105
|
py
|
Python
|
core/backend/user/admin.py
|
Djacket/djacket
|
8f5258ae34ab2fb2849324145681e6d4932a22ba
|
[
"MIT"
] | 85
|
2016-02-19T06:46:29.000Z
|
2022-03-25T20:20:47.000Z
|
core/backend/user/admin.py
|
Djacket/djacket
|
8f5258ae34ab2fb2849324145681e6d4932a22ba
|
[
"MIT"
] | 15
|
2016-04-08T02:46:11.000Z
|
2022-01-29T08:20:45.000Z
|
core/backend/user/admin.py
|
Djacket/djacket
|
8f5258ae34ab2fb2849324145681e6d4932a22ba
|
[
"MIT"
] | 20
|
2016-04-08T02:39:08.000Z
|
2021-12-16T14:05:28.000Z
|
from django.contrib import admin
from user.models import UserProfile
admin.site.register(UserProfile)
| 15
| 35
| 0.828571
| 14
| 105
| 6.214286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 105
| 6
| 36
| 17.5
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3080b8a3c10dc880fcbc82fe92a34df9b8dd2b6c
| 21
|
py
|
Python
|
python/testData/copyPaste/Indent7709.dst.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/copyPaste/Indent7709.dst.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/copyPaste/Indent7709.dst.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
a = 1
<caret>
b = 2
| 4.2
| 7
| 0.428571
| 5
| 21
| 1.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0.380952
| 21
| 4
| 8
| 5.25
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
061e768ebbd1f3a14a7b9d3348eeff85f5bb0a7c
| 8,176
|
py
|
Python
|
tests/unit/test_download_hashes.py
|
jamezpolley/pip
|
0b9beab59c1bd5b634e198e919b9173690fe1d65
|
[
"MIT"
] | 1
|
2019-06-27T11:57:35.000Z
|
2019-06-27T11:57:35.000Z
|
tests/unit/test_download_hashes.py
|
jamezpolley/pip
|
0b9beab59c1bd5b634e198e919b9173690fe1d65
|
[
"MIT"
] | 1
|
2021-08-07T12:15:25.000Z
|
2021-08-07T12:15:25.000Z
|
tests/unit/test_download_hashes.py
|
jamezpolley/pip
|
0b9beab59c1bd5b634e198e919b9173690fe1d65
|
[
"MIT"
] | 1
|
2020-01-06T15:39:00.000Z
|
2020-01-06T15:39:00.000Z
|
import pytest
from pip.download import _get_hash_from_file, _check_hash
from pip.exceptions import InstallationError
from pip.index import Link
def test_get_hash_from_file_md5(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#md5=d41d8cd98f00b204e9800998ecf8427e"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 16
assert download_hash.hexdigest() == "d41d8cd98f00b204e9800998ecf8427e"
def test_get_hash_from_file_sha1(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 20
assert download_hash.hexdigest() == (
"da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
def test_get_hash_from_file_sha224(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha224=d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 28
assert download_hash.hexdigest() == (
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f"
)
def test_get_hash_from_file_sha384(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha384=38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e"
"1da274edebfe76f65fbd51ad2f14898b95b"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 48
assert download_hash.hexdigest() == (
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274e"
"debfe76f65fbd51ad2f14898b95b"
)
def test_get_hash_from_file_sha256(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852"
"b855"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 32
assert download_hash.hexdigest() == (
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
def test_get_hash_from_file_sha512(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha512=cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36"
"ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash.digest_size == 64
assert download_hash.hexdigest() == (
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0"
"d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
)
def test_get_hash_from_file_unknown(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#unknown_hash=d41d8cd98f00b204e9800998ecf8427e"
)
download_hash = _get_hash_from_file(file_path, file_link)
assert download_hash is None
def test_check_hash_md5_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#md5=d41d8cd98f00b204e9800998ecf8427e"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_md5_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#md5=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hash_sha1_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha1_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha1=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hash_sha224_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha224=d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f'"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha224_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha224=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hash_sha384_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha384=38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6"
"e1da274edebfe76f65fbd51ad2f14898b95b"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha384_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha384=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hash_sha256_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b785"
"2b855"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha256_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha256=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hash_sha512_valid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha512=cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36c"
"e9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
)
download_hash = _get_hash_from_file(file_path, file_link)
_check_hash(download_hash, file_link)
def test_check_hash_sha512_invalid(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link("http://testserver/gmpy-1.15.tar.gz#sha512=deadbeef")
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, file_link)
def test_check_hasher_mismsatch(data):
file_path = data.packages.join("gmpy-1.15.tar.gz")
file_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#md5=d41d8cd98f00b204e9800998ecf8427e"
)
other_link = Link(
"http://testserver/gmpy-1.15.tar.gz"
"#sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b785"
"2b855"
)
download_hash = _get_hash_from_file(file_path, file_link)
with pytest.raises(InstallationError):
_check_hash(download_hash, other_link)
| 30.969697
| 79
| 0.731409
| 1,007
| 8,176
| 5.595829
| 0.072493
| 0.073824
| 0.050932
| 0.07276
| 0.768412
| 0.764508
| 0.737178
| 0.737178
| 0.737178
| 0.737178
| 0
| 0.159386
| 0.162794
| 8,176
| 263
| 80
| 31.087452
| 0.663842
| 0
| 0
| 0.56044
| 0
| 0
| 0.319839
| 0.180528
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.10989
| false
| 0
| 0.021978
| 0
| 0.131868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
063364c6842c15d8b14becc36b89cf99f7946a78
| 61
|
py
|
Python
|
dynd/benchmarks/__init__.py
|
mwiebe/dynd-python
|
45ffecaf7887761a5634140f0ed120b33ace58a3
|
[
"BSD-2-Clause"
] | 93
|
2015-01-29T14:00:57.000Z
|
2021-11-23T14:37:27.000Z
|
dynd/benchmarks/__init__.py
|
ContinuumIO/dynd-python
|
bae7afb8eb604b0bce09befc9e896c8ec8357aaa
|
[
"BSD-2-Clause"
] | 143
|
2015-01-04T12:30:24.000Z
|
2016-09-29T18:36:22.000Z
|
dynd/benchmarks/__init__.py
|
ContinuumIO/dynd-python
|
bae7afb8eb604b0bce09befc9e896c8ec8357aaa
|
[
"BSD-2-Clause"
] | 20
|
2015-06-08T11:54:46.000Z
|
2021-03-09T07:57:25.000Z
|
try:
from pycuda import autoinit
except ImportError:
pass
| 15.25
| 29
| 0.786885
| 8
| 61
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 61
| 4
| 30
| 15.25
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
063b61ac2b90fe757f49547346fd44173e48ddd3
| 37
|
py
|
Python
|
tests/__init__.py
|
smitchandarana/FredGdp
|
fe836d7949e265666d4acc2dbb712864d0cfd083
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
smitchandarana/FredGdp
|
fe836d7949e265666d4acc2dbb712864d0cfd083
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
smitchandarana/FredGdp
|
fe836d7949e265666d4acc2dbb712864d0cfd083
|
[
"MIT"
] | null | null | null |
"""Unit test package for fredgdp."""
| 18.5
| 36
| 0.675676
| 5
| 37
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 1
| 37
| 37
| 0.78125
| 0.810811
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
06ad6373313bb361891ab628ca6299807dd5fef0
| 63
|
py
|
Python
|
src/hub/dataload/sources/umls/__init__.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 78
|
2017-05-26T08:38:25.000Z
|
2022-02-25T08:55:31.000Z
|
src/hub/dataload/sources/umls/__init__.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 105
|
2017-05-18T21:57:13.000Z
|
2022-03-18T21:41:47.000Z
|
src/hub/dataload/sources/umls/__init__.py
|
mlebeur/mygene.info
|
e71ca89c2b1c546c260101286ad5419503fd6653
|
[
"Apache-2.0"
] | 19
|
2017-06-12T18:31:54.000Z
|
2021-11-10T00:04:43.000Z
|
from .upload import UMLSUploader
from .dump import UMLSDumper
| 15.75
| 32
| 0.825397
| 8
| 63
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 3
| 33
| 21
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
06b201d6220d6dc039a1006bf5ad0cd54def299c
| 24
|
py
|
Python
|
lib/model/siamese_net/proposal_target_layer.py
|
YeLyuUT/FastVOD
|
707dcf0d88a901d2db0b7cf24096801fbdd8735c
|
[
"MIT"
] | 1
|
2020-05-12T14:07:00.000Z
|
2020-05-12T14:07:00.000Z
|
lib/model/siamese_net/proposal_target_layer.py
|
YeLyuUT/FastVOD
|
707dcf0d88a901d2db0b7cf24096801fbdd8735c
|
[
"MIT"
] | null | null | null |
lib/model/siamese_net/proposal_target_layer.py
|
YeLyuUT/FastVOD
|
707dcf0d88a901d2db0b7cf24096801fbdd8735c
|
[
"MIT"
] | 1
|
2019-12-18T09:43:48.000Z
|
2019-12-18T09:43:48.000Z
|
# sample training pairs.
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0.916667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ebf7acb38f0bc056d0475f87693461b7eb5ed277
| 5,319
|
py
|
Python
|
pendulopodes/dynamics.py
|
nwunderly/pendulopodes
|
e9daa8c76625b806fe81d2c623a34493390a62d0
|
[
"MIT"
] | 1
|
2021-05-06T00:19:52.000Z
|
2021-05-06T00:19:52.000Z
|
pendulopodes/dynamics.py
|
nwunderly/pendulopodes
|
e9daa8c76625b806fe81d2c623a34493390a62d0
|
[
"MIT"
] | null | null | null |
pendulopodes/dynamics.py
|
nwunderly/pendulopodes
|
e9daa8c76625b806fe81d2c623a34493390a62d0
|
[
"MIT"
] | null | null | null |
import numpy as np
from .constants import g
"""
Assumptions:
(constant) unit length
unit mass
rotate about origin
2-dimensional (for now)
theta = 0 at (x, y) = (1, 0)
Coordinate system:
|
|
------------|------------> Y
|
|
|
|
|
V
X
Angular kinematics: (RADIANS)
theta: angle
omega: angular velocity
alpha: angular acceleration
"""
def inertial_to_polar(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan(y/x)
return r, theta
def polar_to_inertial(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
# def eqm_single_simple_pendulum(theta, omega):
# """Single-element simple pendulum equation of motion.
# (Shoutout to Derek Paley)
#
# tension = m*g*cos(theta)+m*l*theta
# alpha = -g/l*sin(theta
# """
# alpha = -g*np.sin(theta)
#
# return omega, alpha
#
#
# def system_single_simple_pendulum(t, y):
# """System of differential equations for a single-element simple pendulum.
#
# y = [theta, omega]
# y_dot = [omega, alpha]
# """
# theta, omega = y
#
# omega, alpha = eqm_single_simple_pendulum(theta, omega)
# y_dot = [omega, alpha]
#
# return y_dot
#
#
# def eqm_double_simple_pendulum(theta1, omega1, theta2, omega2):
# """Two-element simple pendulum equation of motion.
#
# Equations found at https://www.myphysicslab.com/pendulum/double-pendulum-en.html
# """
# alpha1_n = -g*(2*m1 + m2)*np.sin(theta1) - m2*g*np.sin(theta1 - 2*theta2) - 2*np.sin(theta1 - theta2)*m2*(omega2**2 * l2 + omega1**2 * l1 * np.cos(theta1 - theta2))
# alpha1_d = l1 * (2*m1 + m2 - m2*np.cos(2*theta1 - 2*theta2))
# alpha1 = alpha1_n / alpha1_d
#
# alpha2_n = 2*np.sin(theta1 - theta2) * (omega1**2 * l1 * (m1 + m2) + g*(m1 + m2) * np.cos(theta1) + omega2**2 * l2 * m2 * np.cos(theta1 - theta2))
# alpha2_d = l2 * (2*m1 + m2 - m2*np.cos(2*theta1 - 2*theta2))
# alpha2 = alpha2_n / alpha2_d
#
# return omega1, alpha1, omega2, alpha2
#
#
# def system_double_simple_pendlum(t, y):
# """System of differential equations for a two-element simple pendulum.
#
# y = [theta1, omega1, theta2, omega2]
# y_dot = [omega1, alpha1, omega2, alpha2]
# """
# theta1, omega1, theta2, omega2 = y
#
# omega1, alpha1, omega2, alpha2 = eqm_double_simple_pendulum(theta1, omega1, theta2, omega2)
# y_dot = [omega1, alpha1, omega2, alpha2]
#
# return y_dot
class NElementPendulum:
def __init__(self, element_count, *, length=(1,), mass=(1,), theta0=(np.pi/2,), omega0=(0,)):
if element_count > 1:
assert element_count % len(length) == 0
assert element_count % len(mass) == 0
assert element_count % len(theta0) == 0
assert element_count % len(omega0) == 0
self.element_count = element_count
self.length = length*(element_count//len(length))
self.mass = mass*(element_count//len(mass))
self.theta0 = theta0*(element_count//len(theta0))
self.omega0 = omega0*(element_count//len(omega0))
def eqm_single_simple_pendulum(self, theta, omega):
"""Single-element simple pendulum equation of motion.
(Shoutout to Derek Paley)
tension = m*g*cos(theta)+m*l*theta
alpha = -g/l*sin(theta
"""
alpha = -g / self.length[0] * np.sin(theta)
return omega, alpha
def system_single_simple_pendulum(self, t, y):
"""System of differential equations for a single-element simple pendulum.
y = [theta, omega]
y_dot = [omega, alpha]
"""
theta, omega = y
omega, alpha = self.eqm_single_simple_pendulum(theta, omega)
y_dot = [omega, alpha]
return y_dot
def eqm_double_simple_pendulum(self, theta1, omega1, theta2, omega2):
"""Two-element simple pendulum equation of motion.
Equations found at https://www.myphysicslab.com/pendulum/double-pendulum-en.html
"""
m1 = self.mass[0]
m2 = self.mass[1]
l1 = self.length[0]
l2 = self.length[1]
alpha1_n = -g * (2 * m1 + m2) * np.sin(theta1) - m2 * g * np.sin(theta1 - 2 * theta2) - 2 * np.sin(theta1 - theta2) * m2 * (
omega2 ** 2 * l2 + omega1 ** 2 * l1 * np.cos(theta1 - theta2))
alpha1_d = l1 * (2 * m1 + m2 - m2 * np.cos(2 * theta1 - 2 * theta2))
alpha1 = alpha1_n / alpha1_d
alpha2_n = 2 * np.sin(theta1 - theta2) * (
omega1 ** 2 * l1 * (m1 + m2) + g * (m1 + m2) * np.cos(theta1) + omega2 ** 2 * l2 * m2 * np.cos(theta1 - theta2))
alpha2_d = l2 * (2 * m1 + m2 - m2 * np.cos(2 * theta1 - 2 * theta2))
alpha2 = alpha2_n / alpha2_d
return omega1, alpha1, omega2, alpha2
def system_double_simple_pendlum(self, t, y):
"""System of differential equations for a two-element simple pendulum.
y = [theta1, omega1, theta2, omega2]
y_dot = [omega1, alpha1, omega2, alpha2]
"""
theta1, omega1, theta2, omega2 = y
omega1, alpha1, omega2, alpha2 = self.eqm_double_simple_pendulum(theta1, omega1, theta2, omega2)
y_dot = [omega1, alpha1, omega2, alpha2]
return y_dot
| 30.568966
| 170
| 0.582816
| 711
| 5,319
| 4.241913
| 0.137834
| 0.083554
| 0.055703
| 0.06366
| 0.765252
| 0.733753
| 0.726127
| 0.726127
| 0.726127
| 0.723475
| 0
| 0.059221
| 0.27618
| 5,319
| 173
| 171
| 30.745665
| 0.724156
| 0.42602
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 1
| 0.142857
| false
| 0
| 0.040816
| 0
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2371bd1c2612bfb16e91537d3cbfc4b5744328a2
| 203
|
py
|
Python
|
utils/lock.py
|
DrugowitschLab/motion-structure-identification
|
908f084b36c7387daf0cbfe75f16bab70cf96db9
|
[
"MIT"
] | null | null | null |
utils/lock.py
|
DrugowitschLab/motion-structure-identification
|
908f084b36c7387daf0cbfe75f16bab70cf96db9
|
[
"MIT"
] | null | null | null |
utils/lock.py
|
DrugowitschLab/motion-structure-identification
|
908f084b36c7387daf0cbfe75f16bab70cf96db9
|
[
"MIT"
] | null | null | null |
class Lock:
def __init__(self):
self.locked = False
def lock(self, msg):
assert not self.locked, msg
self.locked = True
def unlock(self):
self.locked = False
| 20.3
| 35
| 0.576355
| 26
| 203
| 4.346154
| 0.461538
| 0.353982
| 0.247788
| 0.336283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.330049
| 203
| 10
| 36
| 20.3
| 0.830882
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.375
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
23764f172090631ac31ec7f17f09516d1e0246f6
| 124
|
py
|
Python
|
tests/test_scripts/__init__.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | null | null | null |
tests/test_scripts/__init__.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | 3
|
2021-03-22T00:44:24.000Z
|
2021-06-26T19:32:31.000Z
|
tests/test_scripts/__init__.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | null | null | null |
"""Test suites for the various scripts that interface with the
turnovertools libraries."""
from .test_insert_umid import *
| 24.8
| 62
| 0.790323
| 17
| 124
| 5.647059
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137097
| 124
| 4
| 63
| 31
| 0.897196
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
88dfd5ee854bcd48d4fa1dc40aaa126ba1e06da6
| 193
|
py
|
Python
|
matchzoo/models/__init__.py
|
JacobPolloreno/MatchZoo
|
e49d619a52b2e96b6f0e8e76164d76f623210198
|
[
"Apache-2.0"
] | null | null | null |
matchzoo/models/__init__.py
|
JacobPolloreno/MatchZoo
|
e49d619a52b2e96b6f0e8e76164d76f623210198
|
[
"Apache-2.0"
] | null | null | null |
matchzoo/models/__init__.py
|
JacobPolloreno/MatchZoo
|
e49d619a52b2e96b6f0e8e76164d76f623210198
|
[
"Apache-2.0"
] | null | null | null |
from .naive_model import NaiveModel
from .dssm_model import DSSMModel
from .cdssm_model import CDSSMModel
from .dense_baseline_model import DenseBaselineModel
from .arci_model import ArcIModel
| 32.166667
| 52
| 0.870466
| 26
| 193
| 6.230769
| 0.538462
| 0.339506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103627
| 193
| 5
| 53
| 38.6
| 0.936416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
88fd9e5a7fa3b960439e0b6e392fa703ec132322
| 1,111
|
py
|
Python
|
aitoolbox/experiment/core_metrics/regression.py
|
mv1388/AIToolbox
|
c64ac4810a02d230ce471d86b758e82ea232a7e7
|
[
"MIT"
] | 3
|
2019-10-12T12:24:09.000Z
|
2020-08-02T02:42:43.000Z
|
aitoolbox/experiment/core_metrics/regression.py
|
mv1388/aitoolbox
|
1060435e6cbdfd19abcb726c4080b663536b7467
|
[
"MIT"
] | 3
|
2020-04-10T14:07:07.000Z
|
2020-04-22T19:04:38.000Z
|
aitoolbox/experiment/core_metrics/regression.py
|
mv1388/aitoolbox
|
1060435e6cbdfd19abcb726c4080b663536b7467
|
[
"MIT"
] | null | null | null |
from aitoolbox.experiment.core_metrics.abstract_metric import AbstractBaseMetric
from sklearn.metrics import mean_squared_error, mean_absolute_error
class MeanSquaredErrorMetric(AbstractBaseMetric):
def __init__(self, y_true, y_predicted):
"""Model prediction MSE
Args:
y_true (numpy.array or list): ground truth targets
y_predicted (numpy.array or list): predicted targets
"""
AbstractBaseMetric.__init__(self, y_true, y_predicted, metric_name='Mean_squared_error')
def calculate_metric(self):
return mean_squared_error(self.y_true, self.y_predicted)
class MeanAbsoluteErrorMetric(AbstractBaseMetric):
def __init__(self, y_true, y_predicted):
"""Model prediction MAE
Args:
y_true (numpy.array or list): ground truth targets
y_predicted (numpy.array or list): predicted targets
"""
AbstractBaseMetric.__init__(self, y_true, y_predicted, metric_name='Mean_absolute_error')
def calculate_metric(self):
return mean_absolute_error(self.y_true, self.y_predicted)
| 34.71875
| 97
| 0.718272
| 133
| 1,111
| 5.62406
| 0.285714
| 0.053476
| 0.072193
| 0.069519
| 0.705882
| 0.705882
| 0.705882
| 0.532086
| 0.532086
| 0.532086
| 0
| 0
| 0.205221
| 1,111
| 31
| 98
| 35.83871
| 0.847112
| 0.251125
| 0
| 0.333333
| 0
| 0
| 0.049007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0004a1432c50a486fee18e368a1eb6ed79f33d00
| 39
|
py
|
Python
|
modules/2.79/bpy/types/TextureNodeTexBlend.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/TextureNodeTexBlend.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/TextureNodeTexBlend.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
class TextureNodeTexBlend:
pass
| 6.5
| 26
| 0.717949
| 3
| 39
| 9.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25641
| 39
| 5
| 27
| 7.8
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
000a6432772d70f9ddc37eab2166d959e80a9b9e
| 32
|
py
|
Python
|
test/login.py
|
jinhongyi/test007
|
b48750ed30a690ceb1ec739b9d181b6ecb82b0a0
|
[
"MIT"
] | null | null | null |
test/login.py
|
jinhongyi/test007
|
b48750ed30a690ceb1ec739b9d181b6ecb82b0a0
|
[
"MIT"
] | null | null | null |
test/login.py
|
jinhongyi/test007
|
b48750ed30a690ceb1ec739b9d181b6ecb82b0a0
|
[
"MIT"
] | null | null | null |
num1=10
num2=20
num3=30
num4=40
| 6.4
| 7
| 0.75
| 8
| 32
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 0.125
| 32
| 4
| 8
| 8
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cc52c5c7709da52f4cca0eba70ca5b79485d6260
| 87
|
py
|
Python
|
grove/helper/__init__.py
|
chousemath/grove.py
|
ebab518ace0c8efe34a56078c9a876368d80781f
|
[
"MIT"
] | null | null | null |
grove/helper/__init__.py
|
chousemath/grove.py
|
ebab518ace0c8efe34a56078c9a876368d80781f
|
[
"MIT"
] | null | null | null |
grove/helper/__init__.py
|
chousemath/grove.py
|
ebab518ace0c8efe34a56078c9a876368d80781f
|
[
"MIT"
] | null | null | null |
from .helper import SlotHelper
from .os_sched import *
# __all__ = [ 'SlotHelper' ]
| 12.428571
| 30
| 0.701149
| 10
| 87
| 5.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195402
| 87
| 6
| 31
| 14.5
| 0.8
| 0.298851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cc52f4467a8fefa5e93405ceaba9750cb564e846
| 693
|
py
|
Python
|
db/queries.py
|
hamedsh/healthCheck
|
8f6b8ffffc1f1d8849a58b4966e54d30ead9556b
|
[
"Apache-2.0"
] | null | null | null |
db/queries.py
|
hamedsh/healthCheck
|
8f6b8ffffc1f1d8849a58b4966e54d30ead9556b
|
[
"Apache-2.0"
] | null | null | null |
db/queries.py
|
hamedsh/healthCheck
|
8f6b8ffffc1f1d8849a58b4966e54d30ead9556b
|
[
"Apache-2.0"
] | null | null | null |
QUERIES = {
'add_service':
'insert into services(name, type, repeat_period, metadata, status) values("{name}", {type}, {repeat_period}, "{metadata}", 1)',
'services_last_row_id': 'select max(id) from services',
'get_active_services': 'select services.id, services.name, services.type, service_types.Type, services.repeat_period, services.metadata from services INNER join service_types on services.type = service_types.id',
'get_active_services_type': 'select services.id, services.name, services.type, service_types.Type, services.repeat_period, services.metadata from services INNER join service_types on services.type = service_types.id where services.type = {}'
}
| 77
| 245
| 0.756133
| 92
| 693
| 5.48913
| 0.293478
| 0.142574
| 0.150495
| 0.190099
| 0.716832
| 0.605941
| 0.605941
| 0.605941
| 0.605941
| 0.605941
| 0
| 0.001645
| 0.122655
| 693
| 8
| 246
| 86.625
| 0.828947
| 0
| 0
| 0
| 0
| 0.428571
| 0.89899
| 0.10101
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cc81aa64d2274c4ce850357fafef27da6f801644
| 127
|
py
|
Python
|
SampleInvoiceCRUDUsingDict/apps.py
|
juned8236/quickbook
|
32757c911d176131d71ccd532c07378950962053
|
[
"Apache-2.0"
] | null | null | null |
SampleInvoiceCRUDUsingDict/apps.py
|
juned8236/quickbook
|
32757c911d176131d71ccd532c07378950962053
|
[
"Apache-2.0"
] | 2
|
2020-06-06T00:52:36.000Z
|
2021-06-10T22:40:04.000Z
|
SampleInvoiceCRUDUsingDict/apps.py
|
juned8236/quickbook
|
32757c911d176131d71ccd532c07378950962053
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class SampleinvoicecrudusingdictConfig(AppConfig):
name = 'SampleInvoiceCRUDUsingDict'
| 21.166667
| 50
| 0.826772
| 10
| 127
| 10.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11811
| 127
| 5
| 51
| 25.4
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0.204724
| 0.204724
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cca1569749bc6de7062709323854b9efde4deb62
| 1,032
|
py
|
Python
|
tests/test_face_recognition.py
|
bagashvilit/facial_recognition_bias
|
abe87302b1d452cbd9100773a16c127f4d2ab546
|
[
"MIT"
] | null | null | null |
tests/test_face_recognition.py
|
bagashvilit/facial_recognition_bias
|
abe87302b1d452cbd9100773a16c127f4d2ab546
|
[
"MIT"
] | 1
|
2021-11-15T04:09:22.000Z
|
2021-11-15T04:09:22.000Z
|
tests/test_face_recognition.py
|
bagashvilit/facial_recognition_bias
|
abe87302b1d452cbd9100773a16c127f4d2ab546
|
[
"MIT"
] | 1
|
2021-11-17T05:10:08.000Z
|
2021-11-17T05:10:08.000Z
|
import pickle
import cv2
import joblib
import pytest
import sklearn
from pyimagesearch.rgbhistogram import RGBHistogram
@pytest.mark.parametrize(
"input_image,expected_gender",
[("tests/images/17_1_0.jpg", 1), ("tests/images/23_1_2.jpg", 1)],
)
def test_SVM(input_image, expected_gender):
desc = RGBHistogram([8, 8, 8])
model = pickle.load(open("src/SVM/SVM_model.pkl", "rb"))
image = cv2.imread(input_image)
features = desc.describe(image)
gender = (model.predict([features.flatten()]))[0]
assert gender == expected_gender
@pytest.mark.parametrize(
"input_image,expected_gender",
[("tests/images/17_1_0.jpg", 1), ("tests/images/23_1_2.jpg", 1)],
)
def test_Random_Forest(input_image, expected_gender):
desc = RGBHistogram([8, 8, 8])
model = pickle.load(open("src/RandomForest/RandomForest_model.pkl", "rb"))
image = cv2.imread(input_image)
features = desc.describe(image)
gender = (model.predict([features.flatten()]))[0]
assert gender == expected_gender
| 25.170732
| 78
| 0.700581
| 140
| 1,032
| 4.985714
| 0.307143
| 0.08596
| 0.103152
| 0.137536
| 0.7851
| 0.7851
| 0.7851
| 0.7851
| 0.7851
| 0.7851
| 0
| 0.035308
| 0.149225
| 1,032
| 40
| 79
| 25.8
| 0.759681
| 0
| 0
| 0.571429
| 0
| 0
| 0.203488
| 0.199612
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ccbf427b7c08bffd5472d1329684dbdf347f1cf4
| 128
|
py
|
Python
|
test/smoke_test_server.py
|
carbonblack/cb-lastline-connector
|
0129c4c8737248b83bdae817eafd9873fb8cae65
|
[
"MIT"
] | 13
|
2016-04-01T02:00:29.000Z
|
2021-06-10T07:12:12.000Z
|
test/smoke_test_server.py
|
carbonblack/cb-lastline-connector
|
0129c4c8737248b83bdae817eafd9873fb8cae65
|
[
"MIT"
] | 5
|
2015-12-14T19:24:23.000Z
|
2021-07-29T14:15:28.000Z
|
test/smoke_test_server.py
|
carbonblack/cb-lastline-connector
|
0129c4c8737248b83bdae817eafd9873fb8cae65
|
[
"MIT"
] | 12
|
2016-02-02T06:25:12.000Z
|
2021-06-10T07:12:26.000Z
|
from flask import Flask
from utils.mock_server import get_mocked_server
app = Flask(__name__)
server = get_mocked_server(app)
| 18.285714
| 47
| 0.820313
| 20
| 128
| 4.8
| 0.5
| 0.1875
| 0.3125
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 128
| 6
| 48
| 21.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
aeed49e8fabd2ce9076d8c02009a3bcf54efd7ed
| 173
|
py
|
Python
|
control_citas/apps/doctor/admin.py
|
mariomtzjr/agenda-medica
|
a36eaf79507d63e35f8f14796c916f0f5aaa36d4
|
[
"MIT"
] | null | null | null |
control_citas/apps/doctor/admin.py
|
mariomtzjr/agenda-medica
|
a36eaf79507d63e35f8f14796c916f0f5aaa36d4
|
[
"MIT"
] | null | null | null |
control_citas/apps/doctor/admin.py
|
mariomtzjr/agenda-medica
|
a36eaf79507d63e35f8f14796c916f0f5aaa36d4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from apps.doctor.models import Doctor
# Register your models here.
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
pass
| 17.3
| 37
| 0.786127
| 23
| 173
| 5.913043
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138728
| 173
| 9
| 38
| 19.222222
| 0.912752
| 0.150289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
4e3d90d85e0e65b12dcc087a2dc8241b6026b8de
| 38
|
py
|
Python
|
python/testData/psi/ResetAfterSemicolon.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/psi/ResetAfterSemicolon.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/psi/ResetAfterSemicolon.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
if True:
import tmp2; import tmp1
| 12.666667
| 28
| 0.684211
| 6
| 38
| 4.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.263158
| 38
| 2
| 29
| 19
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9d99b6e17bd9c32f9f0117b93b7211d398781a03
| 119
|
py
|
Python
|
d6/d6.py
|
mwm021/Advent-of-Code-2021
|
9908b95ea6503c1b31fa26845e8ee5d0ad474718
|
[
"MIT"
] | null | null | null |
d6/d6.py
|
mwm021/Advent-of-Code-2021
|
9908b95ea6503c1b31fa26845e8ee5d0ad474718
|
[
"MIT"
] | null | null | null |
d6/d6.py
|
mwm021/Advent-of-Code-2021
|
9908b95ea6503c1b31fa26845e8ee5d0ad474718
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import csv
import numpy as np
def d6_1():
pass
def d6_2():
pass
d6_1()
d6_2()
| 8.5
| 19
| 0.655462
| 24
| 119
| 3.083333
| 0.541667
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.260504
| 119
| 13
| 20
| 9.153846
| 0.75
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
9dd169aac1b5bf47d2d437164010983d95f3fb44
| 103
|
py
|
Python
|
artbot/proxy/__init__.py
|
skielred/ArtCompanion
|
6b3d41dcdbd1151778324cf8068e0ce74cfab09c
|
[
"MIT"
] | 1
|
2021-05-04T09:18:17.000Z
|
2021-05-04T09:18:17.000Z
|
artbot/proxy/__init__.py
|
skielred/ArtCompanion
|
6b3d41dcdbd1151778324cf8068e0ce74cfab09c
|
[
"MIT"
] | 6
|
2021-04-18T01:03:50.000Z
|
2021-08-30T14:18:30.000Z
|
artbot/proxy/__init__.py
|
skielred/ArtCompanion
|
6b3d41dcdbd1151778324cf8068e0ce74cfab09c
|
[
"MIT"
] | null | null | null |
from .cog import ProxyCog
def init(bot):
bot.add_cog(ProxyCog(bot))
from . import pixiv, twitter
| 14.714286
| 30
| 0.718447
| 16
| 103
| 4.5625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174757
| 103
| 6
| 31
| 17.166667
| 0.858824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9dd9b88bd13b0fb7392b2a258acf22d9a4ae3cf5
| 174
|
py
|
Python
|
ticketer/processor.py
|
gavinB-orange/ticketer
|
4e49d928dd6a4d22134dcbf989e84fd335f45307
|
[
"Apache-2.0"
] | null | null | null |
ticketer/processor.py
|
gavinB-orange/ticketer
|
4e49d928dd6a4d22134dcbf989e84fd335f45307
|
[
"Apache-2.0"
] | null | null | null |
ticketer/processor.py
|
gavinB-orange/ticketer
|
4e49d928dd6a4d22134dcbf989e84fd335f45307
|
[
"Apache-2.0"
] | null | null | null |
# contains the processing code for the ticketer.
class Processor(object):
def __init__(self):
pass
def say(self, message):
return message + "\n"
| 14.5
| 48
| 0.626437
| 21
| 174
| 5
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.281609
| 174
| 11
| 49
| 15.818182
| 0.84
| 0.264368
| 0
| 0
| 0
| 0
| 0.016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
9ddda570c754203d55e826630562afac6c1ee49d
| 251
|
py
|
Python
|
muddery/events/base_event_action.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
muddery/events/base_event_action.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
muddery/events/base_event_action.py
|
noahzaozao/muddery
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Event action's base class.
"""
class BaseEventAction(object):
"""
Event action's base class.
"""
key = ""
name = ""
def func(self, event, character):
"""
Event action's function.
"""
pass
| 13.944444
| 37
| 0.494024
| 25
| 251
| 4.96
| 0.6
| 0.266129
| 0.290323
| 0.258065
| 0.33871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.354582
| 251
| 17
| 38
| 14.764706
| 0.765432
| 0.310757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9de4a6fd7fb969be970653874bef2adabed0bf35
| 1,639
|
py
|
Python
|
fastconv/conv2d/conv2d.py
|
wueric/fastconv
|
7b34f09eb83439241737e764b93e584d582ca917
|
[
"MIT"
] | null | null | null |
fastconv/conv2d/conv2d.py
|
wueric/fastconv
|
7b34f09eb83439241737e764b93e584d582ca917
|
[
"MIT"
] | null | null | null |
fastconv/conv2d/conv2d.py
|
wueric/fastconv
|
7b34f09eb83439241737e764b93e584d582ca917
|
[
"MIT"
] | null | null | null |
import numpy as np
from . import imageconv_cpp
def batch_parallel_2Dconv_same(batched_images: np.ndarray,
filter_coeffs: np.ndarray,
pad_values: float) -> np.ndarray:
'''
Performs batched 2D "same" convolution of images in parallel
:param batched_images: np.ndarray shape (batch, height, width), dtype either np.float32 or np.float64
:param filter_coeffs: np.ndarray of shape (kern_height, kern_width), dtype either np.float32 or np.float64
:param pad_values: float, value to pad the border by to produce a "same" convolution
:return:
'''
if batched_images.ndim != 3:
raise ValueError("batched_images must have ndim 3")
if filter_coeffs.ndim != 2:
raise ValueError("filter_coeffs must have ndim 2")
return imageconv_cpp.batch_smallfilter_2dconv_same(batched_images, filter_coeffs, pad_values)
def batch_parallel_2Dconv_valid(batched_images : np.ndarray,
filter_coeffs: np.ndarray) -> np.ndarray:
'''
Performs batched 2D "same" convolution of images in parallel
:param batched_images: np.ndarray shape (batch, height, width), dtype either np.float32 or np.float64
:param filter_coeffs: np.ndarray of shape (kern_height, kern_width), dtype either np.float32 or np.float64
:return:
'''
if batched_images.ndim != 3:
raise ValueError("batched_images must have ndim 3")
if filter_coeffs.ndim != 2:
raise ValueError("filter_coeffs must have ndim 2")
return imageconv_cpp.batch_smallfilter_2dconv_shrink(batched_images, filter_coeffs)
| 39.97561
| 110
| 0.695546
| 220
| 1,639
| 4.990909
| 0.245455
| 0.118397
| 0.054645
| 0.080146
| 0.787796
| 0.787796
| 0.787796
| 0.787796
| 0.709472
| 0.704918
| 0
| 0.023641
| 0.225747
| 1,639
| 40
| 111
| 40.975
| 0.841608
| 0.392923
| 0
| 0.470588
| 0
| 0
| 0.130342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d194f64a2b9a375402e064dbd7dcb5d7db4e0e27
| 99
|
py
|
Python
|
VacationPy/api_keys.py
|
scarlett014j/python-api-challenge
|
be3fbe4dbc274caccbafd548d222526609e66420
|
[
"ADSL"
] | null | null | null |
VacationPy/api_keys.py
|
scarlett014j/python-api-challenge
|
be3fbe4dbc274caccbafd548d222526609e66420
|
[
"ADSL"
] | null | null | null |
VacationPy/api_keys.py
|
scarlett014j/python-api-challenge
|
be3fbe4dbc274caccbafd548d222526609e66420
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
weather_api_key = "Put Key Here"
# Google API Key
g_key = "Put key here"
| 16.5
| 32
| 0.727273
| 17
| 99
| 4.058824
| 0.470588
| 0.26087
| 0.26087
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191919
| 99
| 5
| 33
| 19.8
| 0.8625
| 0.373737
| 0
| 0
| 0
| 0
| 0.40678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d1d1daaef4159ea769be23a4d98c1743c882c3a1
| 31
|
py
|
Python
|
Warmup/Q08 - Mini-Max Sum/python.py
|
utkarshg6/SMVDU-HackerRank
|
1f8764be28cd8170841b134bcf9c68e349ba79bc
|
[
"MIT"
] | 2
|
2018-07-01T21:12:59.000Z
|
2018-09-05T16:05:24.000Z
|
Warmup/Q08 - Mini-Max Sum/python.py
|
utkarshg6/SMVDU-HackerRank
|
1f8764be28cd8170841b134bcf9c68e349ba79bc
|
[
"MIT"
] | 4
|
2018-02-20T06:45:49.000Z
|
2018-03-29T20:55:53.000Z
|
Warmup/Q08 - Mini-Max Sum/python.py
|
utkarshg6/SMVDU-HackerRank
|
1f8764be28cd8170841b134bcf9c68e349ba79bc
|
[
"MIT"
] | 3
|
2018-02-19T11:35:30.000Z
|
2018-03-27T15:23:18.000Z
|
#Q08 - Mini-Max Sum || Warmup
| 31
| 31
| 0.612903
| 5
| 31
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.225806
| 31
| 1
| 31
| 31
| 0.708333
| 0.967742
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
060b0413bfc93fa8d01d87750fba3b06589bbcac
| 422
|
py
|
Python
|
General/Modules/Macros/FZJveinThickness/least_sqr/LA.py
|
IBG-2/phenoVein
|
534330747c54a35966b68951526fa2e381fb924d
|
[
"BSD-3-Clause"
] | 1
|
2020-08-18T02:18:19.000Z
|
2020-08-18T02:18:19.000Z
|
General/Modules/Macros/FZJveinThickness/least_sqr/LA.py
|
IBG-2/phenoVein
|
534330747c54a35966b68951526fa2e381fb924d
|
[
"BSD-3-Clause"
] | null | null | null |
General/Modules/Macros/FZJveinThickness/least_sqr/LA.py
|
IBG-2/phenoVein
|
534330747c54a35966b68951526fa2e381fb924d
|
[
"BSD-3-Clause"
] | null | null | null |
#import Scientific_numerics_package_id
#package = Scientific_numerics_package_id.getNumericsPackageName()
#del Scientific_numerics_package_id
#if package == "Numeric":
# from LinearAlgebra import *
#elif package == "NumPy":
from numpy.oldnumeric.linear_algebra import *
#elif package == "Numarray":
# from numarray.linear_algebra import *
#else:
# raise ImportError("Unknown numerics package " + package)
| 21.1
| 66
| 0.763033
| 46
| 422
| 6.76087
| 0.456522
| 0.192926
| 0.241158
| 0.26045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13981
| 422
| 19
| 67
| 22.210526
| 0.856749
| 0.824645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ae13549bd1d53ac93ac7e0223628341a17825e06
| 120
|
py
|
Python
|
dynamo_pandas/serde/__init__.py
|
divideby0/dynamo-pandas
|
3a28921bb2ecaecab20ee8cd919f4c686a81e2b5
|
[
"MIT"
] | 10
|
2021-04-25T17:13:36.000Z
|
2022-03-28T21:03:10.000Z
|
dynamo_pandas/serde/__init__.py
|
divideby0/dynamo-pandas
|
3a28921bb2ecaecab20ee8cd919f4c686a81e2b5
|
[
"MIT"
] | 30
|
2021-03-07T23:03:41.000Z
|
2021-12-23T14:41:49.000Z
|
dynamo_pandas/serde/__init__.py
|
divideby0/dynamo-pandas
|
3a28921bb2ecaecab20ee8cd919f4c686a81e2b5
|
[
"MIT"
] | 3
|
2021-04-15T21:21:22.000Z
|
2022-03-04T23:32:13.000Z
|
from .serde import TypeDeserializer
from .serde import TypeSerializer
__all__ = ["TypeDeserializer", "TypeSerializer"]
| 24
| 48
| 0.808333
| 11
| 120
| 8.454545
| 0.545455
| 0.193548
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 4
| 49
| 30
| 0.869159
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ae4cce99bc5d43d017c1635ff12184238f8ce688
| 40,873
|
py
|
Python
|
test/TestHyGraph.py
|
shoaibkamil/OLD-kdt-specializer
|
85074ec1990df980d25096ea8c55dd81350e531e
|
[
"BSD-3-Clause"
] | 1
|
2021-11-15T02:11:33.000Z
|
2021-11-15T02:11:33.000Z
|
test/TestHyGraph.py
|
shoaibkamil/OLD-kdt-specializer
|
85074ec1990df980d25096ea8c55dd81350e531e
|
[
"BSD-3-Clause"
] | null | null | null |
test/TestHyGraph.py
|
shoaibkamil/OLD-kdt-specializer
|
85074ec1990df980d25096ea8c55dd81350e531e
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from kdt import *
from kdt import pyCombBLAS as pcb
class HyGraphTests(unittest.TestCase):
def initializeGraph(self, nvert, nedge, i, j, v=1):
"""
Initialize a graph with edge weights equal to one or the input value.
"""
iInd = ParVec(nedge)
jInd = ParVec(nedge)
if type(v) == int or type(v) == float:
vInd = ParVec(nedge, v)
else:
vInd = ParVec(nedge)
for ind in range(nedge):
iInd[ind] = i[ind]
jInd[ind] = j[ind]
if type(v) != int and type(v) != float:
vInd[ind] = v[ind]
return HyGraph(iInd, jInd, vInd, nvert)
class ConstructorTests(HyGraphTests):
def test_toDiGraph(self):
nvert = 7
nSEdge = 9 # #SimpleEdge
origI = [0, 0, 1, 1, 2, 1, 2, 2, 3]
origJ = [1, 2, 2, 3, 3, 4, 4, 5, 6]
origV = 1
G = self.initializeGraph(nvert, nSEdge, origI, origJ, origV)
diG = G.toDiGraph()
[di, dj, dv] = diG.toParVec()
diExpected = [1, 2, 1, 2, 3, 4, 2, 3, 4, 5, 2, 3, 4, 5, 3, 4, 5, 6]
djExpected = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6]
dvExpected = [1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1]
self.assertEqual(7, diG.nvert())
self.assertEqual(18, diG.nedge())
self.assertEqual(18, len(diExpected))
for ind in range(nvert):
self.assertEqual(diExpected[ind], di[ind])
self.assertEqual(djExpected[ind], dj[ind])
self.assertEqual(dvExpected[ind], dv[ind])
class PageRankTests(HyGraphTests):
def test_connected(self):
G = DiGraph.fullyConnected(10)
pr = G.pageRank()
for prv in pr:
self.assertAlmostEqual(0.1, prv, 7)
def test_simple(self):
# This test is drawn from the PageRank example at
# http://en.wikipedia.org/wiki/File:PageRanks-Example.svg.
nvert = 11
nedge = 17
i = [1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10]
j = [2, 1, 0, 1, 1, 3, 5, 1, 4, 1, 4, 1, 4, 1, 4, 4, 4]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
pr = G.pageRank(0.0001)
expected = [0.032814, 0.38440, 0.34291, 0.03909, 0.08089, 0.03909, \
0.01617, 0.01617, 0.01617, 0.01617, 0.01617]
for ind in range(nvert):
self.assertAlmostEqual(pr[ind], expected[ind], 4)
def test_simple_loops(self):
# This test is drawn from the PageRank example at
# http://en.wikipedia.org/wiki/File:PageRanks-Example.svg.
#
# The difference between this and the previous test is that
# this test includes several self loops to verify they have no
# effect on the outcome.
nvert = 11
nedge = 21
i = [1, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 10, 10]
j = [1, 2, 1, 0, 1, 1, 3, 4, 5, 1, 4, 1, 4, 1, 4, 7, 1, 4, 4, 4, 10]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
pr = G.pageRank(0.0001)
expected = [0.032814, 0.38440, 0.34291, 0.03909, 0.08089, 0.03909, \
0.01617, 0.01617, 0.01617, 0.01617, 0.01617]
for ind in range(nvert):
self.assertAlmostEqual(pr[ind], expected[ind], 4)
class NormalizeEdgeWeightsTests(HyGraphTests):
def no_edge_graph(self):
nvert = 4
nedge = 0
i = []
j = []
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
return self.initializeGraph(nvert, nedge, i, j)
def test_no_edges_default(self):
G = self.no_edge_graph()
G.normalizeEdgeWeights()
self.assertEqual(G.nedge(), 0)
def test_no_edges_out(self):
G = self.no_edge_graph()
G.normalizeEdgeWeights(DiGraph.Out)
self.assertEqual(G.nedge(), 0)
def test_no_edges_in(self):
G = self.no_edge_graph()
G.normalizeEdgeWeights(DiGraph.In)
self.assertEqual(G.nedge(), 0)
def small_test_graph(self):
# 1 0 1 0
# 0 0 0 1
# 0 1 0 1
# 1 0 0 0
nvert = 4
nedge = 6
i = [0, 3, 2, 0, 1, 2]
j = [0, 0, 1, 2, 3, 3]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
return [nvert, nedge, i, j, self.initializeGraph(nvert, nedge, i, j)]
def test_small_default(self):
[nvert, nedge, i, j, G] = self.small_test_graph()
G.normalizeEdgeWeights()
[iInd, jInd, eW] = G.toParVec()
w = [0.5, 1., 0.5, 0.5, 1., 0.5]
for ind in range(nedge):
self.assertEqual(i[ind], iInd[ind])
self.assertEqual(j[ind], jInd[ind])
self.assertEqual(eW[ind], w[ind])
def test_small_out(self):
[nvert, nedge, i, j, G] = self.small_test_graph()
G.normalizeEdgeWeights(DiGraph.Out)
[iInd, jInd, eW] = G.toParVec()
w = [0.5, 1., 0.5, 0.5, 1., 0.5]
for ind in range(nedge):
self.assertEqual(i[ind], iInd[ind])
self.assertEqual(j[ind], jInd[ind])
self.assertEqual(eW[ind], w[ind])
def test_small_in(self):
[nvert, nedge, i, j, G] = self.small_test_graph()
G.normalizeEdgeWeights(DiGraph.In)
[iInd, jInd, eW] = G.toParVec()
w = [0.5, 0.5, 1., 1., 0.5, 0.5]
for ind in range(nedge):
self.assertEqual(i[ind], iInd[ind])
self.assertEqual(j[ind], jInd[ind])
self.assertEqual(eW[ind], w[ind])
class DegreeTests(HyGraphTests):
def test_degree_no_edges(self):
nvert = 4
nedge = 0
i = []
j = []
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
deg = G.degree()
for ind in range(nvert):
self.assertEqual(deg[ind], 0)
def test_degree_simple(self):
nvert = 11
nedge = 17
i = [0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 4, 4, 5]
j = [3, 2, 3, 4, 5, 6, 7, 8, 1, 4, 5, 6, 7, 8, 9, 10, 4]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
deg = G.degree()
degExpected = [0, 1, 1, 2, 3, 2, 2, 2, 2, 1, 1]
for ind in range(nvert):
self.assertEqual(deg[ind], degExpected[ind])
def test_npin_no_edges(self):
nvert = 4
nedge = 0
i = []
j = []
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
npin = G.npin()
for ind in range(nedge):
self.assertEqual(npin[ind], 0)
def test_npin_simple(self):
nSvert = 11
nSedge = 17
i = [0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 4, 4, 5]
j = [3, 2, 3, 4, 5, 6, 7, 8, 1, 4, 5, 6, 7, 8, 9, 10, 4]
self.assertEqual(len(i), nSedge)
self.assertEqual(len(j), nSedge)
G = self.initializeGraph(nSvert, nSedge, i, j)
nvert = G.nvert()
self.assertEqual(11, nvert)
nedge = G.nedge()
self.assertEqual(6, nedge)
npin = G.npin()
npinExpected = [1, 7, 1, 1, 6, 1]
for ind in range(nedge):
self.assertEqual(npin[ind], npinExpected[ind])
def test_rank_no_edges(self):
nvert = 4
nedge = 0
i = []
j = []
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
rank = G.rank()
self.assertEqual(rank, 0)
def test_rank_simple(self):
nSvert = 11
nSedge = 17
i = [0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 4, 4, 5]
j = [3, 2, 3, 4, 5, 6, 7, 8, 1, 4, 5, 6, 7, 8, 9, 10, 4]
self.assertEqual(len(i), nSedge)
self.assertEqual(len(j), nSedge)
G = self.initializeGraph(nSvert, nSedge, i, j)
nvert = G.nvert()
self.assertEqual(11, nvert)
nedge = G.nedge()
self.assertEqual(6, nedge)
rank = G.rank()
self.assertEqual(rank, 7)
def test_antirank_no_edges(self):
nvert = 4
nedge = 0
i = []
j = []
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
G = self.initializeGraph(nvert, nedge, i, j)
antirank = G.antirank()
self.assertEqual(antirank, 0)
def test_antirank_simple(self):
nSvert = 11
nSedge = 17
i = [0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 4, 4, 4, 4, 5]
j = [3, 2, 3, 4, 5, 6, 7, 8, 1, 4, 5, 6, 7, 8, 9, 10, 4]
self.assertEqual(len(i), nSedge)
self.assertEqual(len(j), nSedge)
G = self.initializeGraph(nSvert, nSedge, i, j)
nvert = G.nvert()
self.assertEqual(11, nvert)
nedge = G.nedge()
self.assertEqual(6, nedge)
antirank = G.antirank()
self.assertEqual(antirank, 1)
class CentralityTests(HyGraphTests):
def test_exactBC_twoDTorus(self):
n = 16
G = DiGraph.twoDTorus(n)
nv = G.nvert()
bc = G.centrality('exactBC',normalize=True)
bcExpected = 0.0276826
for ind in range(nv):
self.assertAlmostEqual(bc[ind],bcExpected, 6)
def test_approxBC_twoDTorus(self):
n = 16
G = DiGraph.twoDTorus(n)
nv = G.nvert()
bc = G.centrality('approxBC',sample=1.0, normalize=True)
bcExpected = 0.0276826
for ind in range(nv):
self.assertAlmostEqual(bc[ind],bcExpected, 6)
def test_approxBC_twoDTorus_sample(self):
n = 16
G = DiGraph.twoDTorus(n)
nv = G.nvert()
bc = G.centrality('approxBC',sample=0.05, normalize=True)
bcExpected = 0.0276
for ind in range(nv):
self.assertAlmostEqual(bc[ind],bcExpected,2)
class BFSTreeTests(HyGraphTests):
def test_bfsTree(self):
nvert = 7
nSEdge = 9 # #SimpleEdge
origI = [0, 0, 1, 1, 2, 1, 2, 2, 3]
origJ = [1, 2, 2, 3, 3, 4, 4, 5, 6]
origV = 1
G = self.initializeGraph(nvert, nSEdge, origI, origJ, origV)
root = 1
parentsExpected = [-1, 1, 1, 2, 2, 4, -1]
parents = G.bfsTree(root)
self.assertEqual(len(parentsExpected), len(parents))
for ind in range(len(parents)):
self.assertEqual(parentsExpected[ind], parents[ind])
class IsBFSTreeTests(HyGraphTests):
def test_isBfsTree(self):
nvert = 7
nSEdge = 9 # #SimpleEdge
origI = [0, 0, 1, 1, 2, 1, 2, 2, 3]
origJ = [1, 2, 2, 3, 3, 4, 4, 5, 6]
origV = 1
G = self.initializeGraph(nvert, nSEdge, origI, origJ, origV)
root = 1
parents = G.bfsTree(root)
ret = G.isBfsTree(root, parents)
self.assertTrue(type(ret)==tuple)
[ret2, levels] = ret
self.assertTrue(ret2)
levelsExpected = [-1, 0, 1, 2, 2, 3, -1]
self.assertEqual(len(levelsExpected),len(levels))
for i in range(len(levels)):
self.assertEqual(levelsExpected[i],levels[i])
class NeighborsTests(HyGraphTests):
def test_neighbors(self):
nvert = 8
nedge = 13
i = [1, 1, 2, 2, 3, 4, 4, 4, 5, 6, 7, 7, 7]
j = [2, 4, 5, 7, 6, 1, 3, 7, 6, 3, 3, 4, 5]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
neighborsExpected = [0, 1, 0, 1, 0, 0, 0, 1]
G = self.initializeGraph(nvert, nedge, i, j)
neighbors = G.neighbors(4)
for ind in range(nvert):
self.assertEqual(neighbors[ind], neighborsExpected[ind])
def test_neighbors_2hop(self):
nvert = 8
nedge = 12
i = [1, 1, 2, 2, 4, 4, 4, 5, 6, 7, 7, 7]
j = [2, 4, 5, 7, 1, 3, 7, 6, 3, 3, 4, 5]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
neighborsExpected = [0, 1, 1, 1, 1, 1, 0, 1]
G = self.initializeGraph(nvert, nedge, i, j)
neighbors = G.neighbors(4, nhop=2)
for ind in range(nvert):
self.assertEqual(neighbors[ind], neighborsExpected[ind])
class PathsHopTests(HyGraphTests):
def test_pathsHop(self):
nvert = 8
nedge = 13
i = [1, 1, 2, 2, 3, 4, 4, 4, 5, 6, 7, 7, 7]
j = [2, 4, 5, 7, 6, 1, 3, 7, 6, 3, 3, 4, 5]
self.assertEqual(len(i), nedge)
self.assertEqual(len(j), nedge)
neighborsExpected = [-1, 4, -1, 4, -1, 2, -1, 4]
G = self.initializeGraph(nvert, nedge, i, j)
tmp = ParVec.range(8)
starts = (tmp == 2) | (tmp == 4)
neighbors = G.pathsHop(starts)
for ind in range(nvert):
self.assertEqual(neighbors[ind], neighborsExpected[ind])
class LoadTests(HyGraphTests):
def test_load(self):
G = HyGraph.load('testfiles/UFlorida_Pajek_Sandi_sandi.mtx')
G._T(); # swap so edges are papers, not authors
self.assertEqual(G.nvert(),314)
self.assertEqual(G.nedge(),360)
[i, j, v] = G.toParVec()
self.assertEqual(len(i),613)
self.assertEqual(len(j),613)
self.assertEqual(len(v),613)
expectedNdx = [100, 200, 300, 400, 500, 600]
expectedI = [ 23, 13, 222, 244, 310, 352]
expectedJ = [ 63, 121, 155, 204, 252, 309]
expectedV = 1
for ind in range(len(expectedNdx)):
self.assertEqual(i[expectedNdx[ind]], expectedI[ind])
self.assertEqual(j[expectedNdx[ind]], expectedJ[ind])
self.assertEqual(v[expectedNdx[ind]], expectedV)
def test_load_bad_file(self):
self.assertRaises(IOError, DiGraph.load, 'not_a_real_file.mtx')
# def test_UFget_simple_unsym(self):
# G = UFget('Pajek/CSphd')
# self.assertEqual(G.nvert(), 1882)
# self.assertEqual(G.nedge(), 1740)
#
# def test_UFget_simple_sym(self):
# G = UFget('Pajek/dictionary28')
# self.assertEqual(G.nvert(), 52652)
# self.assertEqual(G.nedge(), 178076)
class MaxTests(HyGraphTests):
def test_max_out(self):
nvert = 9
nedge = 19
i = [0, 1, 1, 2, 1, 3, 2, 3, 3, 4, 6, 8, 7, 8, 1, 1, 1, 1, 1]
j = [1, 0, 2, 1, 3, 1, 3, 2, 4, 3, 8, 6, 8, 7, 4, 5, 6, 7, 8]
v = [01, 10, 12, 21, 13, 31, 23, 32, 34, 43, 68, 1.6e10, 78, 87, 14,
15, 16, 17, 18]
G = self.initializeGraph(nvert, nedge, i, j, v)
self.assertEqual(G.nvert(), nvert)
self.assertEqual(G.nedge(), nedge)
outmax = G.max(dir=DiGraph.Out)
inmax = G.max(dir=DiGraph.In)
outmaxExpected = [1, 18, 23, 34, 43, 0, 68, 78, 1.6e10]
inmaxExpected = [10, 31, 32, 43, 34, 15, 1.6e+10, 87, 78]
self.assertEqual(len(outmax), len(outmaxExpected))
self.assertEqual(len(inmax), len(inmaxExpected))
for ind in range(len(outmax)):
self.assertEqual(outmax[ind], outmaxExpected[ind])
self.assertEqual(inmax[ind], inmaxExpected[ind])
class MinTests(HyGraphTests):
def test_min_out(self):
nvert = 9
nedge = 19
i = [0, 1, 1, 2, 1, 3, 2, 3, 3, 4, 6, 8, 7, 8, 1, 1, 1, 1, 1]
j = [1, 0, 2, 1, 3, 1, 3, 2, 4, 3, 8, 6, 8, 7, 4, 5, 6, 7, 8]
v = [-01, -10, -12, -21, -13, -31, -23, -32, -34, -43, -68, -1.6e10,
-78, -87, -14, -15, -16, -17, -18]
G = self.initializeGraph(nvert, nedge, i, j, v)
self.assertEqual(G.nvert(), nvert)
self.assertEqual(G.nedge(), nedge)
outmin = G.min(dir=DiGraph.Out)
inmin = G.min(dir=DiGraph.In)
outminExpected = [-1, -18, -23, -34, -43, 0, -68, -78, -1.6e10]
inminExpected = [-10, -31, -32, -43, -34, -15, -1.6e+10, -87, -78]
self.assertEqual(len(outmin), len(outminExpected))
self.assertEqual(len(inmin), len(inminExpected))
for ind in range(len(outmin)):
self.assertEqual(outmin[ind], outminExpected[ind])
self.assertEqual(inmin[ind], inminExpected[ind])
class BuiltInMethodTests(HyGraphTests):
def test_HyGraph_simple(self):
# ensure that a simple HyGraph constructor creates the number, source/
# destination, and value pairs expected.
nvert = 7
nSEdge = 9 # #SimpleEdge
origI = [0, 0, 1, 1, 2, 1, 2, 2, 3]
origJ = [1, 2, 2, 3, 3, 4, 4, 5, 6]
origV = [1, 2, 12, 13, 23, 14, 24, 25, 46]
G = self.initializeGraph(nvert, nSEdge, origI, origJ, origV)
self.assertEqual(7, G.nvert())
self.assertEqual(4, G.nedge())
[actualI, actualJ, actualV] = G.toParVec()
self.assertEqual(len(origI), len(actualI))
self.assertEqual(len(origJ), len(actualJ))
self.assertEqual(len(origV), len(actualV))
for ind in range(len(origI)):
self.assertEqual(origI[ind], actualI[ind])
self.assertEqual(origJ[ind], actualJ[ind])
self.assertEqual(origV[ind], actualV[ind])
def test_HyGraph_no_verts(self):
nvert = 0
nedge = 6
i = [0, 3, 2, 2, 1, 3]
j = [0, 0, 1, 2, 3, 3]
self.assertRaises(KeyError, self.initializeGraph, nvert, nedge, i, j)
# def test_indexing_simple_scalar_scalar(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 2, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = 2
# G2 = G[ndx,ndx]
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [0]
# expJ = [0]
# expV = [21]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_indexing_simple_scalar_null(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = 2
# G2 = G[ndx,ndx]
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = []
# expJ = []
# expV = []
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_indexing_simple_ParVeclen1_scalar(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 2, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(1)
# ndx[0] = 2
# G2 = G[ndx,ndx]
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [0]
# expJ = [0]
# expV = [21]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_indexing_simple_ParVeclen1_null(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(1)
# ndx[0] = 2
# G2 = G[ndx,ndx]
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = []
# expJ = []
# expV = []
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_indexing_simple_ParVeclenk(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(3)
# ndx[0] = 2
# ndx[1] = 3
# ndx[2] = 4
# G2 = G[ndx,ndx]
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [1, 0, 2, 1]
# expJ = [0, 1, 1, 2]
# expV = [32, 23, 43, 34]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_subgraph_simple_scalar_scalar(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 2, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = 2
# G2 = G.subgraph(ndx,ndx)
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [0]
# expJ = [0]
# expV = [21]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_subgraph_simple_scalar_null(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = 2
# G2 = G.subgraph(ndx,ndx)
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = []
# expJ = []
# expV = []
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_subgraph_simple_ParVeclen1_scalar(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 2, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(1)
# ndx[0] = 2
# G2 = G.subgraph(ndx,ndx)
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [0]
# expJ = [0]
# expV = [21]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_subgraph_simple_ParVeclen1_null(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(1)
# ndx[0] = 2
# G2 = G.subgraph(ndx,ndx)
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = []
# expJ = []
# expV = []
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_subgraph_simple_ParVeclenk(self):
# # ensure that a simple DiGraph constructor creates the number, source/
# # destination, and value pairs expected.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# ndx = ParVec(3)
# ndx[0] = 2
# ndx[1] = 3
# ndx[2] = 4
# G2 = G.subgraph(ndx,ndx)
# [actualI, actualJ, actualV] = G2.toParVec()
# expI = [1, 0, 2, 1]
# expJ = [0, 1, 1, 2]
# expV = [32, 23, 43, 34]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_DiGraph_duplicates(self):
# # ensure that a DiGraph constructor creates the number, source/
# # destination, and value pairs expected when 3 input edges have
# # the same source and destination.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 3, 3, 3, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# expI = [1, 0, 2, 3, 1, 3, 3, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# expJ = [0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# expV = [10, 1, 21, 31, 12, 32, 79, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G = self.initializeGraph(nvert, nedge, origI, origJ, origV)
# [actualI, actualJ, actualV] = G.toParVec()
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(origI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_add_simple(self):
# # ensure that DiGraph addition creates the number, source/
# # destination, and value pairs expected when all edges are
# # in both DiGraphs.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV1 = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# origV2 = [11, 2, 22, 32, 13, 33, 14, 24, 44, 15, 35, 16, 17, (1.6e+10)+1,
# 18, 88, 19, 69, 79]
# expV = [21, 3, 43, 63, 25, 65, 27, 47, 87, 29, 69, 31, 33, (3.2e+10)+1,
# 35, 175, 37, 137, 157]
# G1 = self.initializeGraph(nvert, nedge, origI, origJ, origV1)
# G2 = self.initializeGraph(nvert, nedge, origI, origJ, origV2)
# G3 = G1+G2
# [actualI, actualJ, actualV] = G3.toParVec()
# self.assertEqual(len(origI), len(actualI))
# self.assertEqual(len(origJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(origI)):
# self.assertEqual(origI[ind], actualI[ind])
# self.assertEqual(origJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_add_union(self):
# # ensure that DiGraph addition creates the number, source/
# # destination, and value pairs expected when some edges are not
# # in both DiGraphs.
# nvert1 = 9
# nedge1 = 19
# origI1 = [1, 0, 2, 4, 1, 3, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ1 = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV1 = [10, 1, 21, 41, 12, 32, 13, 23, 33, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
# nvert2 = 9
# nedge2 = 19
# origI2 = [7, 3, 6, 8, 5, 7, 4, 5, 6, 5, 7, 7, 2, 7, 2, 7, 0, 2, 5]
# origJ2 = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV2 = [70, 31, 61, 81, 52, 72, 43, 53, 63, 54, 74, 75, 26, 1.6e+10,
# 27, 77, 8, 28, 58]
# G2 = self.initializeGraph(nvert2, nedge2, origI2, origJ2, origV2)
# G3 = G1 + G2
# [actualI, actualJ, actualV] = G3.toParVec()
# expNvert = 9
# expNedge = 38
# expI = [1, 7, 0, 2, 3, 4, 6, 8, 1, 3, 5, 7, 1, 2, 3, 4, 5, 6, 1, 3, 5,
# 7, 1, 7, 1, 2, 7, 8, 1, 2, 7, 8, 0, 1, 2, 5, 6, 7]
# expJ = [0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4,
# 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8]
# expV = [10,70, 1,21,31,41,61,81,12,32,52,72,13,23,33,43,53,63,14,34,54,
# 74,15,75,16,26,1.6e+10,1.6e+10,17,27,77,87,8,18,28,58,68,78]
# [actualI, actualJ, actualV] = G3.toParVec()
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertEqual(expV[ind], actualV[ind])
#
# def test_multiply_simple(self):
# # ensure that DiGraph addition creates the number, source/
# # destination, and value pairs expected when all edges are
# # in both DiGraphs.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV1 = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 7.3,
# 17, 87, 18, 68, 78]
# origV2 = [11, 2, 22, 32, 13, 33, 14, 24, 44, 15, 35, 16, 17, 8.3,
# 18, 88, 19, 69, 79]
# expV = [110, 2, 462, 992, 156, 1056, 182, 552, 1892, 210, 1190, 240,
# 272, 60.59, 306, 7656, 342, 4692, 6162]
# G1 = self.initializeGraph(nvert, nedge, origI, origJ, origV1)
# G2 = self.initializeGraph(nvert, nedge, origI, origJ, origV2)
# G3 = G1*G2
# [actualI, actualJ, actualV] = G3.toParVec()
# self.assertEqual(len(origI), len(actualI))
# self.assertEqual(len(origJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(origI)):
# self.assertEqual(origI[ind], actualI[ind])
# self.assertEqual(origJ[ind], actualJ[ind])
# self.assertAlmostEqual(expV[ind], actualV[ind])
#
# def test_multiply_intersection(self):
# # ensure that DiGraph addition creates the number, source/
# # destination, and value pairs expected when some edges are not
# # in both DiGraphs.
# nvert1 = 9
# nedge1 = 19
# origI1 = [1, 0, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
# origJ1 = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV1 = [10, 1, 41, 61, 12, 52, 13, 23, 33, 14, 34, 15, 16, 7.7,
# 17, 87, 8, 68, 78]
# G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
# nvert2 = 9
# nedge2 = 19
# origI2 = [7, 3, 4, 8, 5, 7, 3, 5, 6, 3, 7, 7, 2, 8, 2, 7, 0, 2, 5]
# origJ2 = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV2 = [70, 31, 41, 81, 52, 72, 33, 53, 63, 34, 74, 75, 26, 7.7,
# 27, 77, 8, 28, 58]
# G2 = self.initializeGraph(nvert2, nedge2, origI2, origJ2, origV2)
# G3 = G1*G2
# [actualI, actualJ, actualV] = G3.toParVec()
# expNvert = 9
# expNedge = 6
# expI = [4, 5, 3, 3, 8, 0]
# expJ = [1, 2, 3, 4, 6, 8]
# expV = [1681, 2704, 1089, 1156, 59.29, 64]
# self.assertEqual(len(expI), len(actualI))
# self.assertEqual(len(expJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(expI)):
# self.assertEqual(expI[ind], actualI[ind])
# self.assertEqual(expJ[ind], actualJ[ind])
# self.assertAlmostEqual(expV[ind], actualV[ind])
#
# def test_div_simple(self):
# # ensure that DiGraph addition creates the number, source/
# # destination, and value pairs expected when all edges are
# # in both DiGraphs.
# nvert = 9
# nedge = 19
# origI = [1, 0, 2, 3, 1, 3, 1, 2, 4, 1, 3, 1, 1, 8, 1, 8, 1, 6, 7]
# origJ = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
# origV1 = [10, 1, 21, 31, 12, 32, 13, 23, 43, 14, 34, 15, 16, 1.6e+10,
# 17, 87, 18, 68, 78]
# origV2 = [11, 2, 22, 32, 13, 33, 14, 24, 44, 15, 35, 16, 17, (1.6e+10)+1,
# 18, 88, 19, 69, 79]
# expV = [0.9090909091, 0.5, 0.9545454545, 0.96875, 0.92307692, 0.96969696,
# 0.92857142, 0.95833333, 0.97727272, 0.93333333, 0.97142857, 0.93750000,
# 0.94117647, 1, 0.94444444, 0.98863636, 0.94736842, 0.98550724, 0.98734177]
# G1 = self.initializeGraph(nvert, nedge, origI, origJ, origV1)
# G2 = self.initializeGraph(nvert, nedge, origI, origJ, origV2)
# G3 = G1/G2
# [actualI, actualJ, actualV] = G3.toParVec()
# self.assertEqual(len(origI), len(actualI))
# self.assertEqual(len(origJ), len(actualJ))
# self.assertEqual(len(expV), len(actualV))
# for ind in range(len(origI)):
# self.assertEqual(origI[ind], actualI[ind])
# self.assertEqual(origJ[ind], actualJ[ind])
# self.assertAlmostEqual(expV[ind], actualV[ind])
class GeneralPurposeTests(HyGraphTests):
def test_multNot(self):
nvert1 = 9
nedge1 = 19
origI1 = [1, 0, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
origJ1 = [0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
origV1 = [10, 1, 41, 61, 12, 52, 13, 23, 33, 14, 34, 15, 1.6, 8.6,
17, 87, 8, 68, 78]
G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
nvert2 = 9
nedge2 = 10
origI2 = [7, 0, 4, 8, 5, 2, 7, 8, 1, 7]
origJ2 = [0, 1, 1, 1, 2, 3, 5, 6, 7, 8]
origV2 = [70, 1, 41, 81, 52, 23, 75, 8.6, 17, 78]
G2 = self.initializeGraph(nvert2, nedge2, origI2, origJ2, origV2)
G3 = G1.mulNot(G2)
[actualI, actualJ, actualV] = G3.toParVec()
expNvert = 9
expNedge = 13
expI = [1, 6, 1, 1, 3, 1, 3, 1, 1, 8, 0, 6]
expJ = [0, 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8]
expV = [10, 61, 12, 13, 33, 14, 34, 15, 1.6, 87, 8, 68]
self.assertEqual(len(expI), len(actualI))
self.assertEqual(len(expJ), len(actualJ))
self.assertEqual(len(expV), len(actualV))
for ind in range(len(expI)):
self.assertEqual(expI[ind], actualI[ind])
self.assertEqual(expJ[ind], actualJ[ind])
self.assertAlmostEqual(expV[ind], actualV[ind])
def test_scale_out(self):
nvert1 = 9
nedge1 = 19
origI1 = [0, 1, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
origJ1 = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
origV1 = [10, 1, 41, 61, 12, 52, 13, 23, 33, 14, 34, 15, 1.6, 8.6,
17, 87, 8, 68, 78]
G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
vec1 = SpParVec(nvert1)
# vec[0] null, scaling a null column in G1
vec1[1] = 1
vec1[2] = 2
vec1[3] = 3
vec1[4] = 4
vec1[5] = 5
# vec[6] null, scaling a non-null column in G1
vec1[7] = 7
vec1[8] = 8
G1.scale(vec1, dir=DiGraph.Out)
[actualI, actualJ, actualV] = G1.toParVec()
expI = [0, 1, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
expJ = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
expV = [10, 1, 164, 61, 12, 260, 13, 46, 99, 14, 102, 15, 1.6, 68.8,
17, 696, 8, 68, 546]
self.assertEqual(len(expI), len(actualI))
self.assertEqual(len(expJ), len(actualJ))
self.assertEqual(len(expV), len(actualV))
for ind in range(len(expI)):
self.assertEqual(expI[ind], actualI[ind])
self.assertEqual(expJ[ind], actualJ[ind])
self.assertAlmostEqual(expV[ind], actualV[ind])
def test_scale_in(self):
nvert1 = 9
nedge1 = 19
origI1 = [0, 1, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
origJ1 = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
origV1 = [10, 1, 41, 61, 12, 52, 13, 23, 33, 14, 34, 15, 1.6, 8.6,
17, 87, 8, 68, 78]
G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
vec1 = SpParVec(nvert1)
# vec[0] null, scaling a null column in G1
vec1[1] = 1
vec1[2] = 2
vec1[3] = 3
vec1[4] = 4
vec1[5] = 5
# vec[6] null, scaling a non-null column in G1
vec1[7] = 7
vec1[8] = 8
G1.scale(vec1, dir=DiGraph.In)
[actualI, actualJ, actualV] = G1.toParVec()
expI = [0, 1, 4, 6, 1, 5, 1, 2, 3, 1, 3, 1, 1, 8, 1, 8, 0, 6, 7]
expJ = [1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8]
expV = [10, 1, 41, 61, 24, 104, 39, 69, 99, 56, 136, 75, 1.6, 8.6,
119, 609, 64, 544, 624]
self.assertEqual(len(expI), len(actualI))
self.assertEqual(len(expJ), len(actualJ))
self.assertEqual(len(expV), len(actualV))
for ind in range(len(expI)):
self.assertEqual(expI[ind], actualI[ind])
self.assertEqual(expJ[ind], actualJ[ind])
self.assertAlmostEqual(expV[ind], actualV[ind])
class LinearAlgebraTests(HyGraphTests):
def test_matMul_1row1col(self):
nvert1 = 16
nedge1 = 4
origI1 = [0, 0, 0, 0]
origJ1 = [1, 3, 4, 12]
origV1 = [1, 1, 1, 1]
G1 = self.initializeGraph(nvert1, nedge1, origI1, origJ1, origV1)
nvert2 = 16
nedge2 = 4
origI2 = [1, 3, 4, 12]
origJ2 = [0, 0, 0, 0]
origV2 = [1, 1, 1, 1]
G2 = self.initializeGraph(nvert2, nedge2, origI2, origJ2, origV2)
G3 = G1._SpMM(G2)
self.assertEqual(G1.nvert(), G3.nvert())
[i3, j3, v3] = G3.toParVec()
expLen = 1
self.assertEqual(len(i3),expLen)
self.assertEqual(len(j3),expLen)
self.assertEqual(len(v3),expLen)
expectedI = [0]
expectedJ = [0]
expectedV = [4]
for ind in range(len(expectedI)):
self.assertEqual(i3[ind], expectedI[ind])
self.assertEqual(j3[ind], expectedJ[ind])
self.assertEqual(v3[ind], expectedV[ind])
def test_matMul_simple(self):
G = DiGraph.load('testfiles/small_nonsym_fp.mtx')
GT = G.copy()
GT._T()
G2 = G._SpMM(GT)
self.assertEqual(G.nvert(),9)
[i2, j2, v2] = G2.toParVec()
self.assertEqual(len(i2),30)
self.assertEqual(len(j2),30)
self.assertEqual(len(v2),30)
expectedI = [0, 2, 3, 1, 2, 3, 4, 6, 7, 8, 0, 1, 2, 3, 4, 0, 1, 2, 3,
1, 2, 4, 1, 6, 7, 1, 6, 7, 1, 8]
expectedJ = [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3,
4, 4, 4, 6, 6, 6, 7, 7, 7, 8, 8]
expectedV = [0.0001, 0.0001, 0.0001, 0.001, 0.0001, 0.0001, 0.0001,
0.0001, 0.0001, 1.6e+8, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001,
0.0001, 0.0001, 0.0001, 0.0003, 0.0001, 0.0001, 0.0001, 0.0001,
0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 1.6e+8, 2.56e+20]
for ind in range(len(expectedI)):
self.assertEqual(i2[ind], expectedI[ind])
self.assertEqual(j2[ind], expectedJ[ind])
self.assertAlmostEqual(v2[ind], expectedV[ind], places=3)
def runTests(verbosity = 1):
testSuite = suite()
unittest.TextTestRunner(verbosity=verbosity).run(testSuite)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(BuiltInMethodTests))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(ConstructorTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(PageRankTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(NormalizeEdgeWeightsTests))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(DegreeTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(CentralityTests))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(BFSTreeTests))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(IsBFSTreeTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(NeighborsTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(PathsHopTests))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(LoadTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(MaxTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(MinTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(GeneralPurposeTests))
#suite.addTests(unittest.TestLoader().loadTestsFromTestCase(LinearAlgebraTests))
return suite
if __name__ == '__main__':
runTests()
| 37.224954
| 91
| 0.582928
| 6,664
| 40,873
| 3.55027
| 0.066327
| 0.14075
| 0.079885
| 0.023627
| 0.784099
| 0.729321
| 0.717275
| 0.710681
| 0.694154
| 0.675303
| 0
| 0.138455
| 0.236268
| 40,873
| 1,097
| 92
| 37.258888
| 0.619458
| 0.451765
| 0
| 0.528777
| 0
| 0
| 0.005456
| 0.003164
| 0
| 0
| 0
| 0
| 0.25
| 0
| null | null | 0
| 0.005396
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ae5383134b36479c90b4a4739b3cd0e4533ed365
| 22,774
|
py
|
Python
|
yxf_yixue/bazi/_fenxi.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | 20
|
2019-01-08T08:13:39.000Z
|
2021-12-23T09:04:14.000Z
|
yxf_yixue/bazi/_fenxi.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | null | null | null |
yxf_yixue/bazi/_fenxi.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | 13
|
2019-04-22T03:25:13.000Z
|
2022-01-04T05:43:48.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from ..utils import Db, Db2Cdata
class Chuantongfenxi:
def __init__(self):
self.pan = None
self.db = Db()
self.db2cdata = Db2Cdata()
def fenxi(self, pan):
self.pan = pan
self.pan['标签'] = '传统分析'
self._wangshuai()
self._geju()
self._yongshen()
self._qushu()
return self.pan
def _wangshuai(self):
pass
def _geju(self):
pass
def _yongshen(self):
pass
def _qushu(self):
pass
def output_addition(self):
map_str = ''
return map_str
class Lianghuafenxi(Chuantongfenxi):
def __init__(self):
super(Lianghuafenxi, self).__init__()
self.pan = None
self.db = Db()
self.db2cdata = Db2Cdata()
def fenxi(self, pan):
self.pan = pan
self.pan['标签'] = '量化分析'
self.pan['量化分析'] = {}
self.pan['量化分析']['八字权重表'] = self.db.get_tabledict_dict("[八字-八字权重]")
self.pan['量化分析']['旺衰权重表'] = self.db.get_tabledict_dict("[八字-旺衰权重]")
self.pan['量化分析']['八字传统定格表'] = self.db.get_tabledict_dict("[八字-八字传统定格表]")
# self.pan['量化分析']['八字量化取用表'] = self.db.get_tabledict_dict("[八字-八字量化取用表]")
self.pan['量化分析']['五行'] = self.pan['五行'] # 存储五行(六亲)量化值
self.pan['量化分析']['天干'] = self.pan['天干'] # 后面会把所有地支转化为天干,存储十神量化值
self.pan['量化分析']['六亲'] = {}
self.pan['量化分析']['十神'] = {}
for wuxing in self.pan['量化分析']['五行']:
self.pan['量化分析']['六亲'][self.pan['量化分析']['五行'][wuxing]['六亲']] = self.pan['量化分析']['五行'][wuxing]
for tiangan in self.pan['量化分析']['天干']:
self.pan['量化分析']['十神'][self.pan['量化分析']['天干'][tiangan]['十神']] = self.pan['量化分析']['天干'][tiangan]
self.pan['量化分析']['旺衰'] = {}
self.pan['量化分析']['八字格局'] = ''
self.pan['量化分析']['格局序号'] = ''
self.pan['量化分析']['取用格局'] = ''
self.pan['量化分析']['八字喜忌'] = {}
self.pan['量化分析']['建议取用'] = {}
self.pan['量化分析']['建议取数'] = ''
self._wangshuai()
self._geju()
self._yongshen()
self._qushu()
return self.pan
def _wangshuai(self):
# 此处采用新浪博客“留指爪”的方法,原文没有提及五行自身旺衰的变化,我认为需要添加此逻辑
# 1.八字旺衰:初始化八字权重系数之天干
for name in ['年干', '月干', '日干', '时干']: # 配置四柱的天干权值
for item in self.pan['量化分析']['八字权重表']:
if name == self.pan['量化分析']['八字权重表'][item]['宫位']:
self.pan['八字单字'][name]['系数'] = 1.0 * float(self.pan['量化分析']['八字权重表'][item]['权重'])
# 2.八字旺衰:初始化八字权重系数之地支(藏干)
for name in ['年支', '月支', '日支', '时支']: # 配置四柱的地支藏干权值
for item in self.pan['量化分析']['八字权重表']:
if name == self.pan['量化分析']['八字权重表'][item]['宫位']:
self.pan['八字单字'][name]['藏干']['藏干1系数'] = float(self.pan['八字单字'][name]['藏干']['藏干1系数']) * float(
self.pan['量化分析']['八字权重表'][item]['权重'])
self.pan['八字单字'][name]['藏干']['藏干2系数'] = float(self.pan['八字单字'][name]['藏干']['藏干2系数']) * float(
self.pan['量化分析']['八字权重表'][item]['权重'])
self.pan['八字单字'][name]['藏干']['藏干3系数'] = float(self.pan['八字单字'][name]['藏干']['藏干3系数']) * float(
self.pan['量化分析']['八字权重表'][item]['权重'])
# 3.八字旺衰:初始化天干本来系数(初始化八字权重系数之后)
for tiangan in self.pan['量化分析']['天干']:
self.pan['量化分析']['天干'][tiangan]['权重'] = 0
for name in ['年干', '月干', '日干', '时干']: # 把四柱天干的系数更新到十神量化值
if tiangan == self.pan['八字单字'][name]['宫主']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['八字单字'][name]['系数'])
# 4.八字旺衰:地支藏干系数转化到天干系数
for name in ['年支','月支','日支','时支']: # 把四柱地支藏干的系数更新到十神量化值
if self.pan['八字单字'][name]['藏干']['藏干1'] != '无': # 地支藏干1
for tiangan in self.pan['量化分析']['天干']:
if tiangan == self.pan['八字单字'][name]['藏干']['藏干1']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['八字单字'][name]['藏干']['藏干1系数'])
if self.pan['八字单字'][name]['藏干']['藏干2'] != '无': # 地支藏干2
for tiangan in self.pan['量化分析']['天干']:
if tiangan == self.pan['八字单字'][name]['藏干']['藏干2']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['八字单字'][name]['藏干']['藏干2系数'])
if self.pan['八字单字'][name]['藏干']['藏干3'] != '无': # 地支藏干3
for tiangan in self.pan['量化分析']['天干']:
if tiangan == self.pan['八字单字'][name]['藏干']['藏干3']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['八字单字'][name]['藏干']['藏干3系数'])
# 5.八字旺衰:干支关系转化到天干系数
for guanxi in self.pan['干支关系']: # 根据干支关系化生的天干及其系数,更新到十神量化值
for zuhe in self.pan['干支关系'][guanxi]:
for tiangan in self.pan['量化分析']['天干']:
if self.pan['干支关系'][guanxi][zuhe].get('化', None):
if tiangan == self.pan['干支关系'][guanxi][zuhe]['化']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['干支关系'][guanxi][zuhe]['化系数'])
if self.pan['干支关系'][guanxi][zuhe].get('化1', None):
if tiangan == self.pan['干支关系'][guanxi][zuhe]['化1']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['干支关系'][guanxi][zuhe]['化1系数'])
if self.pan['干支关系'][guanxi][zuhe].get('化2', None):
if tiangan == self.pan['干支关系'][guanxi][zuhe]['化2']:
self.pan['量化分析']['天干'][tiangan]['权重'] += float(self.pan['干支关系'][guanxi][zuhe]['化2系数'])
# 6.八字旺衰:旺衰权重系数转化到天干系数(依月支)
for tiangan in self.pan['量化分析']['天干']:
for item in self.pan['量化分析']['旺衰权重表']:
if self.pan['量化分析']['天干'][tiangan]['旺衰'] == item:
self.pan['量化分析']['天干'][tiangan]['权重'] *= float(self.pan['量化分析']['旺衰权重表'][item]['权重'])
# 7.八字旺衰:天干系数(十神权值)归一化
sum = 0
for tiangan in self.pan['量化分析']['天干']:
sum += self.pan['量化分析']['天干'][tiangan]['权重']
for tiangan in self.pan['量化分析']['天干']:
self.pan['量化分析']['天干'][tiangan]['归一'] = self.pan['量化分析']['天干'][tiangan]['权重']/sum*100
# 8.八字旺衰:六亲权重(天干十神的简单归并)
for wuxing in self.pan['量化分析']['五行']:
self.pan['量化分析']['五行'][wuxing]['权重'] = 0
for tiangan in self.pan['量化分析']['天干']:
if self.pan['量化分析']['天干'][tiangan]['五行'] == wuxing:
self.pan['量化分析']['五行'][wuxing]['权重'] += self.pan['量化分析']['天干'][tiangan]['权重']
self.pan['量化分析']['五行'][wuxing]['归一'] = self.pan['量化分析']['五行'][wuxing]['权重']/sum*100
# 1.日主旺衰:己生助
self.pan['量化分析']['旺衰']['己生助'] = 0
for wuxing in self.pan['量化分析']['五行']:
if self.pan['量化分析']['五行'][wuxing]['六亲'] in ['比劫', '印枭']:
self.pan['量化分析']['旺衰']['己生助'] += self.pan['量化分析']['五行'][wuxing]['归一']
# 2.日主旺衰:克泄耗
self.pan['量化分析']['旺衰']['克泄耗'] = 0
for wuxing in self.pan['量化分析']['五行']:
if self.pan['量化分析']['五行'][wuxing]['六亲'] in ['官杀', '财星', '食伤']:
self.pan['量化分析']['旺衰']['克泄耗'] += self.pan['量化分析']['五行'][wuxing]['归一']
# 3.日主旺衰:阴气
self.pan['量化分析']['旺衰']['阴气'] = 0
for tiangan in self.pan['量化分析']['天干']:
if tiangan in ['乙', '丁', '己', '辛', '癸']:
self.pan['量化分析']['旺衰']['阴气'] += self.pan['量化分析']['天干'][tiangan]['归一']
# 4.日主旺衰:阳气
self.pan['量化分析']['旺衰']['阳气'] = 0
for tiangan in self.pan['量化分析']['天干']:
if tiangan in ['甲', '丙', '戊', '庚', '壬']:
self.pan['量化分析']['旺衰']['阳气'] += self.pan['量化分析']['天干'][tiangan]['归一']
# 5.日主旺衰:分段判定。这里的百分比是很重要的参数
self.pan['量化分析']['旺衰']['日干'] = '无'
if self.pan['量化分析']['旺衰']['己生助'] < 17:
self.pan['量化分析']['旺衰']['日干'] = '极弱'
elif 17 < self.pan['量化分析']['旺衰']['己生助'] <= 37:
self.pan['量化分析']['旺衰']['日干'] = '弱'
elif 37 < self.pan['量化分析']['旺衰']['己生助'] <= 47:
self.pan['量化分析']['旺衰']['日干'] = '偏弱'
elif 47 < self.pan['量化分析']['旺衰']['己生助'] <= 53:
self.pan['量化分析']['旺衰']['日干'] = '中'
elif 53 < self.pan['量化分析']['旺衰']['己生助'] <= 63:
self.pan['量化分析']['旺衰']['日干'] = '偏强'
elif 63 < self.pan['量化分析']['旺衰']['己生助'] <= 83:
self.pan['量化分析']['旺衰']['日干'] = '强'
elif 83 < self.pan['量化分析']['旺衰']['己生助']:
self.pan['量化分析']['旺衰']['日干'] = '极强'
def _geju(self):
# 八字格局
for tiangan in self.pan['量化分析']['八字传统定格表']:
if self.pan['八字单字']['日干']['宫主'] == tiangan:
geju_str = self.pan['量化分析']['八字传统定格表'][tiangan][self.pan['八字单字']['月支']['宫主']]
self.pan['量化分析']['八字格局'] = geju_str.split(' ')[0]
self.pan['量化分析']['格局序号'] = geju_str.split(' ')[1]
if len(geju_str.split(' ')) >= 3:
self.pan['量化分析']['八字格局'] += geju_str.split(' ')[2]
# 取用格局
if self.pan['量化分析']['旺衰']['日干'] == '极弱':
self.pan['量化分析']['取用格局'] = '从弱'
elif self.pan['量化分析']['旺衰']['日干'] == '极强':
self.pan['量化分析']['取用格局'] = '从强'
elif self.pan['量化分析']['旺衰']['日干'] == '中':
self.pan['量化分析']['取用格局'] = '通关'
else:
self.pan['量化分析']['取用格局'] = '扶抑'
def _yongshen(self):
# 取用不能仅靠量化,需要分类:
# 扶抑:
# 日干弱多官杀,不能克制官杀,而应当泄掉官杀,所以取印枭
# 日干弱多财星,需要比劫来帮身
# 日干弱多食伤,需要印枭克制
# 日干强多印枭,需要财星泄日干以及制印枭
# 日干强多比劫,需要官杀
# 从格:
# 日干极强从强,极弱从弱
# 通关:
# 日干中和,多财印需要官杀通关,多印食需要比劫通关,多官比需要印枭通关
# 调候:
# 日干中和且不需要通关,木火性燥取金水,金水性寒取木火
# 1.喜忌
# 1.1.扶抑喜忌
if self.pan['量化分析']['取用格局'] == '扶抑':
# 1.1.1.日干弱(财官食必大于53)
if self.pan['量化分析']['旺衰']['日干'] in ['弱', '偏弱']:
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['财星']['归一']\
or self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']: # 取印枭
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['官杀']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['财星']
else:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['食伤']
elif self.pan['量化分析']['六亲']['财星']['归一'] > self.pan['量化分析']['六亲']['官杀']['归一'] \
or self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']: # 取比劫
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['财星']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['食伤']
else: # 取印枭
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['食伤']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['财星']['归一']:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['财星']
# 1.1.2.日干强(印比必大于53)
elif self.pan['量化分析']['旺衰']['日干'] in ['强', '偏强']:
if self.pan['量化分析']['六亲']['印枭']['归一'] >= self.pan['量化分析']['六亲']['比劫']['归一']: # 取财星
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['财星']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['比劫']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['食伤']
else: # 取官杀
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['官杀']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['比劫']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['财星']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['食伤']
# 1.2.从格喜忌(与扶抑喜忌对应相反)
elif self.pan['量化分析']['取用格局'] in ['从弱', '从强']:
# 1.2.1.从弱喜克泄耗
if self.pan['量化分析']['取用格局'] == '从弱':
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['财星']['归一'] \
or self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['官杀']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['财星']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['食伤']
elif self.pan['量化分析']['六亲']['财星']['归一'] > self.pan['量化分析']['六亲']['官杀']['归一'] \
or self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['财星']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['食伤']
else:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['食伤']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['财星']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['财星']
# 1.2.2.从强喜生助
elif self.pan['量化分析']['取用格局'] == '从强':
if self.pan['量化分析']['六亲']['印枭']['归一'] >= self.pan['量化分析']['六亲']['比劫']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['财星']
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['比劫']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['官杀']
else:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['食伤']
else:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['官杀']
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['比劫']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['财星']
else:
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['食伤']
# 1.3.通关喜忌
elif self.pan['量化分析']['旺衰']['日干'] == '中':
if self.pan['量化分析']['六亲']['财星']['归一'] + self.pan['量化分析']['六亲']['印枭']['归一'] >= 53: # 取官杀
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['官杀']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['比劫']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['印枭']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['财星']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['印枭']
else:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['财星']
elif self.pan['量化分析']['六亲']['印枭']['归一'] + self.pan['量化分析']['六亲']['食伤']['归一'] >= 53: # 取比劫
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['官杀']
if self.pan['量化分析']['六亲']['印枭']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['印枭']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['食伤']
else:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['食伤']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['印枭']
elif self.pan['量化分析']['六亲']['官杀']['归一'] + self.pan['量化分析']['六亲']['比劫']['归一'] >= 53: # 取印枭
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['六亲']['印枭']
if self.pan['量化分析']['六亲']['财星']['归一'] >= self.pan['量化分析']['六亲']['食伤']['归一']:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['财星']
else:
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['六亲']['食伤']
if self.pan['量化分析']['六亲']['官杀']['归一'] >= self.pan['量化分析']['六亲']['比劫']['归一']:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['官杀']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['比劫']
else:
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['六亲']['比劫']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['六亲']['官杀']
# 1.4.调候喜忌
else:
if self.pan['量化分析']['五行']['木']['归一'] + self.pan['量化分析']['五行']['火']['归一'] >= self.pan['量化分析']['五行']['金']['归一'] + self.pan['量化分析']['五行']['水']['归一']: # 取水金
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['五行']['水']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['五行']['金']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['五行']['火']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['五行']['木']
else: # 取火木
self.pan['量化分析']['八字喜忌']['喜1'] = self.pan['量化分析']['五行']['火']
self.pan['量化分析']['八字喜忌']['喜2'] = self.pan['量化分析']['五行']['木']
self.pan['量化分析']['八字喜忌']['忌1'] = self.pan['量化分析']['五行']['水']
self.pan['量化分析']['八字喜忌']['忌2'] = self.pan['量化分析']['五行']['金']
# 2.通过喜忌取用
self.pan['量化分析']['建议取用'] = {'五行': self.pan['量化分析']['八字喜忌']['喜1']['五行'], '六亲': self.pan['量化分析']['八字喜忌']['喜1']['六亲']}
tmp_list = []
for tiangan in self.pan['量化分析']['天干']:
if self.pan['量化分析']['建议取用']['五行'] == self.pan['量化分析']['天干'][tiangan]['五行']:
tmp_list.append(self.pan['量化分析']['天干'][tiangan])
if tmp_list[0]['归一'] >= tmp_list[1]['归一']:
self.pan['量化分析']['建议取用']['天干'] = tmp_list[0]['天干']
self.pan['量化分析']['建议取用']['十神'] = tmp_list[0]['十神']
else:
self.pan['量化分析']['建议取用']['天干'] = tmp_list[1]['天干']
self.pan['量化分析']['建议取用']['十神'] = tmp_list[1]['十神']
def _qushu(self):
self.pan['量化分析']['建议取数'] = self.pan['量化分析']['五行'][self.pan['量化分析']['八字喜忌']['喜1']['五行']]['五行数']+self.pan['量化分析']['五行'][self.pan['量化分析']['八字喜忌']['喜2']['五行']]['五行数']
def output_addition(self):
map_str = ''
map_str += '\n\n【量化分析】\n'
map_str += '六亲力量:'
for i in self.pan['量化分析']['五行']:
map_str += str(self.pan['量化分析']['五行'][i]['六亲'])
map_str += str(i)
map_str += str(round(self.pan['量化分析']['五行'][i]['归一'],2))+'%'
map_str += ';'
map_str += '\n'
map_str += '十神力量:'
for i in self.pan['量化分析']['天干']:
map_str += str(self.pan['量化分析']['天干'][i]['十神'])
map_str += str(i)
map_str += str(round(self.pan['量化分析']['天干'][i]['归一'],2))+'%'
map_str += ';'
map_str += '\n'
map_str += '命主强弱:'
map_str += '己生助'+str(round(self.pan['量化分析']['旺衰']['己生助'],2))+'%;'
map_str += '克泄耗'+str(round(self.pan['量化分析']['旺衰']['克泄耗'],2))+'%;'
map_str += '阴气'+str(round(self.pan['量化分析']['旺衰']['阴气'],2))+'%;'
map_str += '阳气'+str(round(self.pan['量化分析']['旺衰']['阳气'],2))+'%;'
map_str += '命主'+str(self.pan['量化分析']['旺衰']['日干'])+';'
map_str += '\n'
map_str += '八字格局:'
map_str += str(self.pan['量化分析']['八字格局'])+'格;'
map_str += '\n'
map_str += '取用格局:'
map_str += str(self.pan['量化分析']['取用格局']) + '格;'
map_str += '\n'
map_str += '八字喜忌:喜'+self.pan['量化分析']['八字喜忌']['喜1']['五行']+self.pan['量化分析']['八字喜忌']['喜2']['五行']+';忌'+self.pan['量化分析']['八字喜忌']['忌1']['五行']+self.pan['量化分析']['八字喜忌']['忌2']['五行']+';'
map_str += '\n'
map_str += '建议取用:'+self.pan['量化分析']['建议取用']['六亲']+self.pan['量化分析']['建议取用']['五行']+';'+self.pan['量化分析']['建议取用']['十神']+self.pan['量化分析']['建议取用']['天干']+';'
map_str += '\n'
map_str += '建议取数:'+self.pan['量化分析']['建议取数']+';'
map_str += '\n'
# # 测试
# for k in self.pan['量化分析']:
# map_str += str(k)+': '
# map_str += str(self.pan['量化分析'][k])
# map_str += '\n\n'
return map_str
| 55.411192
| 184
| 0.421533
| 2,857
| 22,774
| 3.328666
| 0.086804
| 0.301788
| 0.424501
| 0.170873
| 0.822292
| 0.745741
| 0.635016
| 0.589274
| 0.551104
| 0.509989
| 0
| 0.013319
| 0.287872
| 22,774
| 410
| 185
| 55.546341
| 0.573067
| 0.045315
| 0
| 0.553314
| 0
| 0
| 0.171957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040346
| false
| 0.011527
| 0.002882
| 0
| 0.060519
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ae68c3ea3965e3f719035242f837e69f2231c6ed
| 52
|
py
|
Python
|
backend/types/files_convertor.py
|
Exanis/cannelloni
|
43adcaf468d95ca774a82e1d2fea3877f0b648a4
|
[
"MIT"
] | 1
|
2017-03-16T16:10:37.000Z
|
2017-03-16T16:10:37.000Z
|
backend/types/files_convertor.py
|
Exanis/cannelloni
|
43adcaf468d95ca774a82e1d2fea3877f0b648a4
|
[
"MIT"
] | null | null | null |
backend/types/files_convertor.py
|
Exanis/cannelloni
|
43adcaf468d95ca774a82e1d2fea3877f0b648a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
"Files-related convertors"
| 10.4
| 26
| 0.615385
| 5
| 52
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.173077
| 52
| 4
| 27
| 13
| 0.72093
| 0.884615
| 0
| 0
| 0
| 0
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
882c1c50ab8e9b8d6006732ce22c17b592419b38
| 19,098
|
py
|
Python
|
tests/unit/modules/test_yumpkg.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
tests/unit/modules/test_yumpkg.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_yumpkg.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt libs
import salt.modules.yumpkg as yumpkg
LIST_REPOS = {
'base': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra',
'name': 'CentOS-$releasever - Base'
},
'base-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/os/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'name': 'CentOS-$releasever - Base Sources'
},
'updates': {
'file': '/etc/yum.repos.d/CentOS-Base.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'mirrorlist': 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra',
'name': 'CentOS-$releasever - Updates'
},
'updates-source': {
'baseurl': 'http://vault.centos.org/centos/$releasever/updates/Source/',
'enabled': '0',
'file': '/etc/yum.repos.d/CentOS-Sources.repo',
'gpgcheck': '1',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7',
'name': 'CentOS-$releasever - Updates Sources'
}
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class YumTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.yumpkg
'''
def setup_loader_modules(self):
return {
yumpkg: {
'__context__': {
'yum_bin': 'yum',
},
'__grains__': {
'osarch': 'x86_64',
'os_family': 'RedHat',
'osmajorrelease': 7,
},
}
}
def test_latest_version_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.latest_version(
'foo',
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'available', 'foo'],
ignore_retcode=True,
output_loglevel='trace',
python_shell=False)
def test_list_repo_pkgs_with_options(self):
'''
Test list_repo_pkgs with and without fromrepo
NOTE: mock_calls is a stack. The most recent call is indexed
with 0, while the first call would have the highest index.
'''
really_old_yum = MagicMock(return_value='3.2.0')
older_yum = MagicMock(return_value='3.4.0')
newer_yum = MagicMock(return_value='3.4.5')
list_repos_mock = MagicMock(return_value=LIST_REPOS)
kwargs = {'output_loglevel': 'trace',
'ignore_retcode': True,
'python_shell': False}
with patch.object(yumpkg, 'list_repos', list_repos_mock):
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': really_old_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with really old yum. The fromrepo argument has no effect on
# the yum commands we'd run.
with patch.dict(yumpkg.__salt__, {'cmd.run': older_yum}):
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
# Check args from first call
self.assertEqual(
cmd.mock_calls[1][1],
(['yum', '--quiet', '--showduplicates', 'list', 'available'],)
)
# Check kwargs from first call
self.assertEqual(cmd.mock_calls[1][2], kwargs)
# Check args from second call
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates', 'list', 'installed'],)
)
# Check kwargs from second call
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test with newer yum. We should run one yum command per repo, so
# fromrepo would limit how many calls we make.
with patch.dict(yumpkg.__salt__, {'cmd.run': newer_yum}):
# When fromrepo is used, we would only run one yum command, for
# that specific repo.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs('foo', fromrepo='base')
# We should have called cmd.run_all once
self.assertEqual(len(cmd.mock_calls), 1)
# Check args
self.assertEqual(
cmd.mock_calls[0][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', 'base', 'list', 'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[0][2], kwargs)
# Test enabling base-source and disabling updates. We should
# get two calls, one for each enabled repo. Because dict
# iteration order will vary, different Python versions will be
# do them in different orders, which is OK, but it will just
# mean that we will have to check both the first and second
# mock call both times.
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_repo_pkgs(
'foo',
enablerepo='base-source',
disablerepo='updates')
# We should have called cmd.run_all twice
self.assertEqual(len(cmd.mock_calls), 2)
for repo in ('base', 'base-source'):
for index in (0, 1):
try:
# Check args
self.assertEqual(
cmd.mock_calls[index][1],
(['yum', '--quiet', '--showduplicates',
'repository-packages', repo, 'list',
'foo'],)
)
# Check kwargs
self.assertEqual(cmd.mock_calls[index][2], kwargs)
break
except AssertionError:
continue
else:
self.fail("repo '{0}' not checked".format(repo))
def test_list_upgrades_dnf(self):
'''
The subcommand should be "upgrades" with dnf
'''
with patch.dict(yumpkg.__context__, {'yum_bin': 'dnf'}):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['dnf', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'upgrades'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_list_upgrades_yum(self):
'''
The subcommand should be "updates" with yum
'''
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.list_upgrades(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'list', 'updates'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
def test_refresh_db_with_options(self):
with patch('salt.utils.pkg.clear_rtag', Mock()):
# With check_update=True we will do a cmd.run to run the clean_cmd, and
# then a separate cmd.retcode to check for updates.
# with fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# without fromrepo
clean_cmd = Mock()
update_cmd = MagicMock(return_value=0)
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd,
'cmd.retcode': update_cmd}):
yumpkg.refresh_db(
check_update=True,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
update_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'check-update',
'--setopt=autocheck_running_kernel=false', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
# With check_update=False we will just do a cmd.run for the clean_cmd
# with fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
fromrepo='good',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=*',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
# without fromrepo
clean_cmd = Mock()
with patch.dict(yumpkg.__salt__, {'cmd.run': clean_cmd}):
yumpkg.refresh_db(
check_update=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
clean_cmd.assert_called_once_with(
['yum', '--quiet', '--assumeyes', 'clean', 'expire-cache', '--disablerepo=bad',
'--enablerepo=good', '--branch=foo'],
python_shell=False)
def test_install_with_options(self):
parse_targets = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch.object(yumpkg, 'list_holds', MagicMock(return_value=[])), \
patch.dict(yumpkg.__salt__, {'pkg_resource.parse_targets': parse_targets}), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
fromrepo='good',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.install(
refresh=False,
enablerepo='good',
disablerepo='bad',
branch='foo')
cmd.assert_called_once_with(
['yum', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', 'install', 'foo'],
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
def test_upgrade_with_options(self):
with patch.object(yumpkg, 'list_pkgs', MagicMock(return_value={})), \
patch('salt.utils.systemd.has_scope', MagicMock(return_value=False)):
# with fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
fromrepo='good',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=*', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)
# without fromrepo
cmd = MagicMock(return_value={'retcode': 0})
with patch.dict(yumpkg.__salt__, {'cmd.run_all': cmd}):
yumpkg.upgrade(
refresh=False,
enablerepo='good',
disablerepo='bad',
exclude='kernel*',
branch='foo')
cmd.assert_called_once_with(
['yum', '--quiet', '-y', '--disablerepo=bad', '--enablerepo=good',
'--branch=foo', '--exclude=kernel*', 'upgrade'],
output_loglevel='trace',
python_shell=False)
| 43.208145
| 115
| 0.484239
| 1,796
| 19,098
| 4.943764
| 0.13196
| 0.030409
| 0.060818
| 0.047077
| 0.796937
| 0.765852
| 0.74389
| 0.731839
| 0.720126
| 0.708638
| 0
| 0.006066
| 0.387109
| 19,098
| 441
| 116
| 43.306122
| 0.752499
| 0.099644
| 0
| 0.687688
| 0
| 0.006006
| 0.196609
| 0.029277
| 0
| 0
| 0
| 0
| 0.099099
| 1
| 0.024024
| false
| 0
| 0.015015
| 0.003003
| 0.045045
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
88430f5062b8c05301fc75c14108244b19a5a701
| 91
|
py
|
Python
|
keras_losses/__init__.py
|
CyberZHG/keras-losses
|
20f6adaf65770c031f63e69570ec96814c4591e8
|
[
"MIT"
] | 9
|
2018-10-11T03:02:18.000Z
|
2021-02-23T03:22:06.000Z
|
keras_losses/__init__.py
|
CyberZHG/keras-losses
|
20f6adaf65770c031f63e69570ec96814c4591e8
|
[
"MIT"
] | null | null | null |
keras_losses/__init__.py
|
CyberZHG/keras-losses
|
20f6adaf65770c031f63e69570ec96814c4591e8
|
[
"MIT"
] | 2
|
2019-01-03T08:49:17.000Z
|
2021-08-12T10:27:12.000Z
|
from .ranking import *
from .weighted import *
from .coral import *
__version__ = '0.5.0'
| 15.166667
| 23
| 0.703297
| 13
| 91
| 4.615385
| 0.615385
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.175824
| 91
| 5
| 24
| 18.2
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.054945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8848ce1af35d57bbbee16305fa556dad09e4aa2c
| 1,278
|
py
|
Python
|
platform/core/polyaxon/tracker/events/operation_run.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/tracker/events/operation_run.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/tracker/events/operation_run.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
import tracker
from events.registry import operation_run
tracker.subscribe(operation_run.OperationRunCreatedEvent)
tracker.subscribe(operation_run.OperationRunUpdatedEvent)
tracker.subscribe(operation_run.OperationRunCleanedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunViewedEvent)
tracker.subscribe(operation_run.OperationRunArchivedEvent)
tracker.subscribe(operation_run.OperationRunRestoredEvent)
tracker.subscribe(operation_run.OperationRunSkippedEvent)
tracker.subscribe(operation_run.OperationRunDeletedEvent)
tracker.subscribe(operation_run.OperationRunDeletedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunStoppedEvent)
tracker.subscribe(operation_run.OperationRunStoppedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunResumedEvent)
tracker.subscribe(operation_run.OperationRunResumedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunRestartedEvent)
tracker.subscribe(operation_run.OperationRunRestartedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunSkippedTriggeredEvent)
tracker.subscribe(operation_run.OperationRunNewStatusEvent)
tracker.subscribe(operation_run.OperationRunSucceededEvent)
tracker.subscribe(operation_run.OperationRunFailedEvent)
tracker.subscribe(operation_run.OperationRunDoneEvent)
| 51.12
| 68
| 0.91471
| 108
| 1,278
| 10.62963
| 0.259259
| 0.219512
| 0.43554
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021909
| 1,278
| 24
| 69
| 53.25
| 0.9184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ee2c74f607e6bb8601355e5b5bbfee305252b336
| 99
|
py
|
Python
|
emiz/weather/avwx/__init__.py
|
theendsofinvention/emiz
|
98b210dd36053ce8062d54e8c501ca4715cd78b5
|
[
"MIT"
] | null | null | null |
emiz/weather/avwx/__init__.py
|
theendsofinvention/emiz
|
98b210dd36053ce8062d54e8c501ca4715cd78b5
|
[
"MIT"
] | 5
|
2020-03-24T16:34:15.000Z
|
2020-06-26T08:31:46.000Z
|
emiz/weather/avwx/__init__.py
|
theendsofinvention/emiz
|
98b210dd36053ce8062d54e8c501ca4715cd78b5
|
[
"MIT"
] | 1
|
2018-04-01T16:02:13.000Z
|
2018-04-01T16:02:13.000Z
|
# coding=utf-8
"""
Access to AVWX API
https://avwx.rest/documentation
"""
from .avwx import AVWX
| 11
| 31
| 0.69697
| 15
| 99
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.151515
| 99
| 8
| 32
| 12.375
| 0.809524
| 0.656566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ee3f26d10e4fd3ce4f4b971ca0eac1d96f87faf4
| 123
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/public/handlers.py
|
drgarcia1986/cookiecutter-muffin
|
7aa861787b4280477a726da99cf9de4047b01d91
|
[
"MIT"
] | 3
|
2016-06-24T21:14:37.000Z
|
2017-03-07T05:36:33.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/public/handlers.py
|
drgarcia1986/cookiecutter-muffin
|
7aa861787b4280477a726da99cf9de4047b01d91
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/public/handlers.py
|
drgarcia1986/cookiecutter-muffin
|
7aa861787b4280477a726da99cf9de4047b01d91
|
[
"MIT"
] | null | null | null |
from .. import app
@app.register('/', methods=['GET'])
def index(request):
return app.ps.jinja2.render('index.html')
| 17.571429
| 45
| 0.658537
| 17
| 123
| 4.764706
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.130081
| 123
| 6
| 46
| 20.5
| 0.747664
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ee55530ac7ae7f4f055a981126d2a2981b0523f1
| 7,890
|
py
|
Python
|
aoc2021/inputs/DATA_4.py
|
catalystcf/freezing-archer
|
9d87ced30d04436d2a05ed8ff29ced2c4a438f03
|
[
"MIT"
] | 37
|
2016-12-14T19:01:47.000Z
|
2021-12-06T15:26:54.000Z
|
aoc2021/inputs/DATA_4.py
|
catalystcf/freezing-archer
|
9d87ced30d04436d2a05ed8ff29ced2c4a438f03
|
[
"MIT"
] | 4
|
2016-08-01T05:19:52.000Z
|
2017-01-07T07:47:43.000Z
|
aoc2021/inputs/DATA_4.py
|
catalystcf/freezing-archer
|
9d87ced30d04436d2a05ed8ff29ced2c4a438f03
|
[
"MIT"
] | 3
|
2016-12-02T09:20:42.000Z
|
2021-12-01T13:31:07.000Z
|
27,14,70,7,85,66,65,57,68,23,33,78,4,84,25,18,43,71,76,61,34,82,93,74,26,15,83,64,2,35,19,97,32,47,6,51,99,20,77,75,56,73,80,86,55,36,13,95,52,63,79,72,9,10,16,8,69,11,50,54,81,22,45,1,12,88,44,17,62,0,96,94,31,90,39,92,37,40,5,98,24,38,46,21,30,49,41,87,91,60,48,29,59,89,3,42,58,53,67,28
31 23 52 26 8
27 89 37 80 46
97 19 63 34 79
13 59 45 12 73
42 25 22 6 39
27 71 24 3 0
79 42 32 72 62
99 52 11 92 33
38 22 16 44 39
35 26 76 49 58
39 19 82 53 57
52 98 69 77 23
1 40 18 66 83
34 85 28 48 16
15 93 38 96 27
74 50 88 84 99
34 2 11 25 17
57 4 19 83 1
59 77 42 36 33
73 22 23 37 55
98 91 56 84 78
45 21 24 83 40
46 58 8 67 4
33 97 55 7 86
2 68 64 27 69
68 29 14 49 26
4 21 87 71 32
58 5 17 46 93
45 96 8 83 2
78 91 9 20 42
49 81 19 48 37
38 23 45 82 92
93 99 67 66 42
40 74 25 56 16
21 47 26 75 61
53 66 72 30 34
55 82 77 6 92
60 56 8 22 88
5 71 49 29 74
28 2 32 84 73
52 31 24 68 41
48 82 19 29 65
51 91 97 39 80
3 55 43 40 38
20 89 53 45 75
29 74 19 89 18
32 88 93 46 63
91 4 94 64 5
57 54 49 36 40
97 81 39 77 1
7 57 94 84 39
92 3 28 15 75
88 45 65 81 63
86 4 89 37 71
8 13 66 42 85
60 66 35 47 98
96 27 40 51 39
3 64 25 28 74
58 17 97 59 29
95 31 18 44 37
3 31 97 85 71
79 82 22 61 98
87 14 17 66 75
36 89 88 83 63
44 8 81 25 48
73 84 28 90 94
25 19 44 10 23
8 59 17 9 93
20 77 97 64 6
98 82 27 70 91
18 51 16 99 2
58 22 89 13 19
39 66 91 8 32
49 24 85 94 42
45 70 10 86 4
23 81 66 13 34
25 80 97 5 42
79 35 2 78 9
0 6 91 94 45
21 90 76 50 56
50 92 2 96 75
85 82 80 97 31
61 35 55 27 56
74 42 9 29 90
86 15 88 47 1
18 20 54 92 62
45 22 32 61 75
1 38 50 81 42
82 4 21 77 65
27 51 56 39 48
36 10 62 28 70
94 99 34 54 6
15 1 41 13 12
92 52 2 63 82
90 64 29 69 32
23 77 33 90 17
45 78 5 67 28
57 73 89 81 21
49 64 37 15 14
7 59 4 43 16
81 92 25 28 90
93 72 43 94 26
24 9 13 74 10
21 2 36 32 51
87 97 55 86 71
82 71 99 17 90
69 95 65 55 10
9 92 39 62 78
59 13 61 24 44
8 31 58 0 57
17 83 55 99 27
79 4 33 76 7
81 43 44 49 72
2 48 97 20 77
47 60 35 16 63
93 95 94 1 98
61 57 84 55 22
85 40 65 46 59
21 15 63 77 7
13 99 49 3 96
8 21 14 45 41
65 63 82 62 28
91 44 22 79 96
20 75 86 3 26
74 11 42 59 36
5 52 43 92 99
46 63 10 45 81
13 66 21 32 89
25 28 96 40 88
27 18 31 73 34
3 26 43 32 36
68 87 67 65 99
73 61 20 90 7
21 52 2 82 10
58 49 56 16 80
97 25 93 63 32
87 14 5 22 76
89 92 91 3 51
0 24 95 69 20
96 11 10 1 55
95 86 44 75 70
59 76 45 2 99
1 34 71 81 41
87 14 33 84 96
8 38 9 82 68
27 71 70 75 76
25 87 2 79 96
20 88 50 37 32
48 94 63 86 22
15 6 34 78 59
30 89 51 31 77
74 10 86 71 84
29 54 58 44 5
11 90 26 50 63
64 62 20 40 46
37 9 46 23 31
68 21 25 36 90
17 33 6 50 30
11 89 20 47 60
26 59 34 62 77
84 52 40 97 7
88 30 42 58 94
64 10 2 90 83
44 35 77 91 47
14 74 9 78 53
86 14 0 39 24
87 69 58 8 73
88 74 27 40 51
63 54 55 93 61
16 66 15 21 48
43 70 9 81 42
36 54 99 34 95
98 19 90 25 44
69 56 18 77 49
58 16 67 75 57
36 44 14 98 23
31 5 83 46 3
45 21 41 11 60
33 81 88 92 65
13 51 48 59 71
12 5 70 87 32
42 18 90 73 88
68 29 76 38 55
67 62 15 77 34
39 27 51 54 19
87 8 92 93 88
77 54 15 1 43
35 97 26 21 29
13 46 96 69 47
51 38 91 32 63
73 99 30 15 16
42 58 21 88 44
45 13 27 68 9
36 6 81 53 5
78 76 11 60 1
57 76 50 78 31
45 42 68 53 16
9 88 89 19 21
96 61 97 69 34
98 87 33 82 0
4 16 89 57 64
46 75 77 65 23
71 42 96 52 38
1 21 93 0 35
59 80 53 36 58
97 62 35 1 88
98 60 17 45 94
12 43 65 23 19
71 52 3 40 59
50 76 61 20 22
92 65 38 93 13
55 26 10 46 29
85 23 19 74 34
60 14 27 36 18
53 4 52 49 17
99 56 93 70 28
25 0 77 80 57
91 50 72 76 23
53 58 95 78 59
75 85 90 44 9
30 8 5 60 6
28 35 59 70 96
20 99 98 81 79
94 78 27 71 4
7 34 43 46 51
93 65 22 69 33
92 49 75 35 11
58 39 62 86 83
64 4 76 48 82
74 1 56 95 31
1 78 98 90 55
80 14 36 99 7
85 8 10 9 92
76 11 40 70 62
43 53 74 35 58
46 78 35 28 49
84 73 65 25 34
40 59 66 36 67
16 22 29 0 45
20 56 39 88 91
32 58 35 25 79
78 94 57 38 14
89 87 68 48 76
7 67 40 51 33
95 31 43 93 92
38 21 82 31 23
54 16 77 37 42
73 99 7 34 90
71 26 5 91 52
22 27 47 85 62
2 86 28 37 55
1 82 9 36 31
52 98 89 30 60
13 17 63 38 57
73 50 42 20 12
56 3 67 62 35
59 39 19 22 27
21 58 57 41 54
75 13 82 50 32
23 5 99 66 10
7 19 45 66 78
38 57 40 73 87
58 30 99 53 83
64 1 8 56 95
70 77 16 18 82
72 83 95 37 35
54 59 92 21 79
7 81 86 29 41
52 99 42 57 71
3 15 75 34 77
7 70 5 69 4
34 60 40 73 6
74 54 67 32 38
93 62 17 51 86
57 88 99 3 16
42 74 11 34 7
82 47 71 31 58
69 23 43 4 64
32 19 98 93 41
63 97 8 85 48
63 54 34 38 86
4 27 15 49 0
61 77 53 98 74
62 23 88 97 37
93 28 25 50 13
56 82 41 27 79
23 31 64 7 65
52 98 93 16 57
88 49 10 11 62
43 95 53 51 83
41 10 87 54 86
19 22 13 40 17
37 27 45 29 63
83 85 81 90 7
57 88 47 66 56
67 44 54 88 89
20 46 61 28 92
86 49 60 83 95
42 78 97 51 96
11 62 4 26 31
18 68 87 26 70
62 84 11 33 90
0 45 66 83 6
20 19 27 44 55
52 8 5 7 3
54 94 88 76 92
13 98 22 33 26
95 62 53 81 24
29 69 15 87 25
61 40 84 90 93
7 31 3 28 46
20 51 21 18 38
30 92 39 70 61
27 88 35 96 74
23 5 66 11 42
40 61 90 57 54
41 14 99 62 59
92 10 48 81 52
22 29 77 18 87
31 79 25 94 13
17 26 44 98 57
74 83 51 14 11
76 91 96 64 33
43 45 92 72 27
66 3 28 20 40
88 82 44 71 55
83 47 51 76 24
86 19 42 34 99
30 31 87 48 62
98 53 68 9 21
3 31 6 41 61
24 77 81 96 44
78 73 1 98 11
40 80 27 65 92
62 67 2 30 10
78 46 50 65 56
84 16 32 58 86
22 12 54 99 35
9 43 55 10 94
66 81 59 92 76
78 3 55 23 83
13 42 94 91 22
14 37 31 67 71
8 61 57 34 43
74 50 0 39 65
78 16 13 91 34
14 74 86 3 97
12 89 58 65 51
29 57 48 44 93
95 1 42 39 92
93 96 16 85 25
59 3 70 19 17
21 84 58 38 86
57 10 35 95 79
81 44 73 63 9
22 1 96 7 93
40 49 2 4 66
87 21 17 32 48
44 28 42 99 26
69 8 85 86 75
21 31 37 87 28
89 43 74 83 57
95 29 92 88 35
94 25 97 81 50
15 19 73 45 63
92 62 67 95 57
30 8 4 39 64
99 31 70 63 96
25 53 24 93 35
34 51 82 91 28
41 30 20 56 46
16 32 98 60 35
67 9 43 42 88
78 90 71 5 29
49 31 37 63 18
80 40 88 5 62
3 6 74 71 97
19 58 63 59 38
50 64 34 68 45
25 30 21 33 83
10 65 67 17 50
21 51 18 68 59
29 78 77 99 76
62 35 96 7 95
82 53 42 49 69
74 65 89 6 1
18 30 72 75 24
60 50 52 55 82
68 99 4 61 22
9 37 84 57 87
96 85 56 72 2
9 38 98 12 4
34 45 74 97 86
18 94 64 70 68
91 41 58 39 66
34 13 26 80 29
0 4 21 60 90
39 73 12 2 19
64 44 61 88 45
59 50 8 91 49
34 85 55 2 75
10 15 89 12 63
90 29 87 73 71
38 17 84 45 9
97 98 77 23 61
47 43 22 58 1
63 44 2 94 99
33 81 51 49 13
38 86 42 91 23
7 67 68 39 84
4 26 12 38 41
43 16 88 71 99
50 24 19 77 98
23 73 44 10 51
56 42 30 52 59
57 16 9 62 27
26 65 56 10 82
0 74 78 12 99
77 18 38 5 37
7 60 40 90 23
14 69 18 51 8
21 79 60 36 12
68 44 59 45 16
90 50 85 25 70
91 31 30 54 26
24 40 51 72 63
31 60 62 25 96
9 44 35 28 91
97 4 34 81 2
61 68 94 52 86
0 57 95 88 94
36 38 25 35 19
13 6 8 61 98
45 85 86 69 97
41 32 7 15 59
41 82 19 29 34
44 96 6 91 76
69 21 32 94 98
4 10 88 30 2
8 74 56 65 99
36 91 73 15 54
62 55 40 27 44
11 60 95 61 46
31 32 21 41 35
74 86 83 89 79
2 96 94 82 68
39 83 49 30 15
62 11 86 99 59
51 80 12 72 58
87 66 98 53 29
44 71 18 63 85
11 75 60 66 13
36 9 94 57 8
10 12 32 3 86
4 29 54 70 21
27 33 76 83 67
77 29 65 39 44
52 34 25 93 64
35 4 57 92 84
41 51 88 96 0
21 91 82 3 26
23 8 36 20 73
54 39 60 34 57
49 99 97 69 43
41 93 95 80 63
73 77 4 9 22
17 33 15 86 79
38 16 99 98 30
64 92 76 50 68
83 85 52 87 88
57 53 13 36 76
7 10 91 3 22
8 84 56 73 59
62 80 85 38 33
68 97 47 14 96
36 8 98 43 70
85 95 31 1 51
33 41 78 89 56
76 16 15 34 82
12 18 39 4 3
98 49 41 30 95
68 89 81 48 84
15 19 90 66 14
32 1 88 34 64
73 65 6 20 86
22 18 13 74 34
75 4 60 88 46
25 97 54 94 91
42 67 40 11 81
5 12 49 48 15
82 91 18 73 57
97 50 34 16 66
29 43 81 20 15
19 44 85 4 32
90 58 39 53 42
50 53 83 10 0
93 16 84 23 13
89 63 75 69 51
65 35 67 56 70
4 37 29 47 38
| 13.106312
| 289
| 0.62725
| 2,600
| 7,890
| 1.903462
| 0.038462
| 0.003233
| 0.002425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.980388
| 0.360203
| 7,890
| 601
| 290
| 13.12812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ee613069e72fc22445ed875ffca215565dc7a91f
| 175
|
py
|
Python
|
script/resumeQuartHeure.py
|
nicolasleger/velibstats
|
850028cd6aa8ba86f7fe597433f8fc6aca211aa0
|
[
"MIT"
] | null | null | null |
script/resumeQuartHeure.py
|
nicolasleger/velibstats
|
850028cd6aa8ba86f7fe597433f8fc6aca211aa0
|
[
"MIT"
] | null | null | null |
script/resumeQuartHeure.py
|
nicolasleger/velibstats
|
850028cd6aa8ba86f7fe597433f8fc6aca211aa0
|
[
"MIT"
] | null | null | null |
from resumeLib import debuterCalculResumeOfResume, debuterCalculResumeOfResumeConso
import datetime
debuterCalculResumeOfResume(5, 15)
debuterCalculResumeOfResumeConso(5, 15)
| 35
| 83
| 0.897143
| 13
| 175
| 12.076923
| 0.615385
| 0.038217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036585
| 0.062857
| 175
| 5
| 84
| 35
| 0.920732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ee80953493527f46a32b0f63bf2a5af30c960fb3
| 206
|
py
|
Python
|
payrolls/admin.py
|
aaronmatei/Payroll-System-Django
|
5605e6a152c56cd171c43dfd07ff0a99eea65b4d
|
[
"bzip2-1.0.6"
] | null | null | null |
payrolls/admin.py
|
aaronmatei/Payroll-System-Django
|
5605e6a152c56cd171c43dfd07ff0a99eea65b4d
|
[
"bzip2-1.0.6"
] | null | null | null |
payrolls/admin.py
|
aaronmatei/Payroll-System-Django
|
5605e6a152c56cd171c43dfd07ff0a99eea65b4d
|
[
"bzip2-1.0.6"
] | 2
|
2020-09-08T07:12:34.000Z
|
2021-11-19T08:25:22.000Z
|
from django.contrib import admin
from .models import Department,Employee,Payrolls
# Register your models here.
admin.site.register(Department)
admin.site.register(Employee)
admin.site.register(Payrolls)
| 20.6
| 48
| 0.820388
| 27
| 206
| 6.259259
| 0.481481
| 0.159763
| 0.301775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092233
| 206
| 9
| 49
| 22.888889
| 0.903743
| 0.126214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c98d2659ca34adf8804849290193ff37893a2510
| 166
|
py
|
Python
|
lib/aws_sso_lib/__init__.py
|
OlafConijn/aws-sso-util
|
2df0fff8d4a8a43d76fa31d429bd8d6c9657f144
|
[
"Apache-2.0"
] | null | null | null |
lib/aws_sso_lib/__init__.py
|
OlafConijn/aws-sso-util
|
2df0fff8d4a8a43d76fa31d429bd8d6c9657f144
|
[
"Apache-2.0"
] | null | null | null |
lib/aws_sso_lib/__init__.py
|
OlafConijn/aws-sso-util
|
2df0fff8d4a8a43d76fa31d429bd8d6c9657f144
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '1.0.0'
from .sso import get_boto3_session, login, list_available_accounts, list_available_roles
from .assignments import Assignment, list_assignments
| 33.2
| 88
| 0.837349
| 23
| 166
| 5.565217
| 0.695652
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.096386
| 166
| 4
| 89
| 41.5
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0.03012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c9a0d41b5bbddafbc70c7d347c3aa3f83f90bd72
| 197
|
py
|
Python
|
P1/admin.py
|
rmohsen/webb
|
cc84260b9c7a5e3bb3a58f75d8c9c606288ef99a
|
[
"MIT"
] | null | null | null |
P1/admin.py
|
rmohsen/webb
|
cc84260b9c7a5e3bb3a58f75d8c9c606288ef99a
|
[
"MIT"
] | null | null | null |
P1/admin.py
|
rmohsen/webb
|
cc84260b9c7a5e3bb3a58f75d8c9c606288ef99a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from P1.models import Post, Post_Word, Comment
admin.site.register(Post)
admin.site.register(Post_Word)
admin.site.register(Comment)
# Register your models here.
| 21.888889
| 46
| 0.807107
| 30
| 197
| 5.233333
| 0.466667
| 0.171975
| 0.324841
| 0.267516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00565
| 0.101523
| 197
| 8
| 47
| 24.625
| 0.881356
| 0.13198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c9ac24322c18e222d1f8d5518921b3f10721bd4c
| 53
|
py
|
Python
|
code/sir/__init__.py
|
FrederikWR/course-02443-stochastic-virus-outbreak
|
4f1d7f1fa4aa197b31ed86c4daf420d5a637974e
|
[
"MIT"
] | null | null | null |
code/sir/__init__.py
|
FrederikWR/course-02443-stochastic-virus-outbreak
|
4f1d7f1fa4aa197b31ed86c4daf420d5a637974e
|
[
"MIT"
] | null | null | null |
code/sir/__init__.py
|
FrederikWR/course-02443-stochastic-virus-outbreak
|
4f1d7f1fa4aa197b31ed86c4daf420d5a637974e
|
[
"MIT"
] | null | null | null |
from .parameter_estimator import ParameterEstimator
| 17.666667
| 51
| 0.886792
| 5
| 53
| 9.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 53
| 2
| 52
| 26.5
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c9d55bc304681f3f1c2bd6c64d5c641d9fc76f45
| 114
|
py
|
Python
|
controlpanel/controlpanel/ros_check/admin.py
|
filesmuggler/acc_web_server
|
2497cbfdb08db30d4eb1ca842cee7c2f65ff7470
|
[
"MIT"
] | null | null | null |
controlpanel/controlpanel/ros_check/admin.py
|
filesmuggler/acc_web_server
|
2497cbfdb08db30d4eb1ca842cee7c2f65ff7470
|
[
"MIT"
] | null | null | null |
controlpanel/controlpanel/ros_check/admin.py
|
filesmuggler/acc_web_server
|
2497cbfdb08db30d4eb1ca842cee7c2f65ff7470
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Car
admin.site.register(Car)
| 14.25
| 32
| 0.780702
| 17
| 114
| 5.235294
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149123
| 114
| 7
| 33
| 16.285714
| 0.917526
| 0.22807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a004c1872468383f76e4f9663e23c1a8f8bd4a36
| 223
|
py
|
Python
|
oauth2_provider/contrib/rest_framework/__init__.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2017-01-09T17:01:28.000Z
|
2021-06-29T21:26:15.000Z
|
oauth2_provider/contrib/rest_framework/__init__.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 7
|
2018-03-14T19:40:42.000Z
|
2020-09-08T16:36:45.000Z
|
oauth2_provider/contrib/rest_framework/__init__.py
|
grnspace/django-oauth-toolkit
|
3d876563a2528eadac0f832f360a0b269b99b94e
|
[
"BSD-2-Clause-FreeBSD"
] | 7
|
2018-03-07T14:02:15.000Z
|
2020-08-13T10:15:37.000Z
|
# flake8: noqa
from .authentication import OAuth2Authentication
from .permissions import (
TokenHasScope, TokenHasReadWriteScope, TokenMatchesOASRequirements,
TokenHasResourceScope, IsAuthenticatedOrTokenHasScope
)
| 31.857143
| 71
| 0.843049
| 14
| 223
| 13.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.112108
| 223
| 6
| 72
| 37.166667
| 0.939394
| 0.053812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4e57f5bac31573d74b2919bfee097904f4d03779
| 253
|
py
|
Python
|
clinicadl/clinicadl/tools/inputs/filename_types.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 112
|
2019-10-21T14:50:35.000Z
|
2022-03-29T03:15:47.000Z
|
clinicadl/clinicadl/tools/inputs/filename_types.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 136
|
2019-10-17T17:40:55.000Z
|
2021-06-30T14:53:29.000Z
|
clinicadl/clinicadl/tools/inputs/filename_types.py
|
yogeshmj/AD-DL
|
76b9b564061581effe8f3698992bfea3ffb055fa
|
[
"MIT"
] | 49
|
2019-11-26T13:57:52.000Z
|
2022-03-20T13:17:42.000Z
|
# coding: utf8
FILENAME_TYPE = {'full': '_T1w_space-MNI152NLin2009cSym_res-1x1x1_T1w',
'cropped': '_T1w_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w',
'skull_stripped': '_space-Ixi549Space_desc-skullstripped_T1w'}
| 42.166667
| 84
| 0.703557
| 27
| 253
| 6.074074
| 0.62963
| 0.097561
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140097
| 0.181818
| 253
| 5
| 85
| 50.6
| 0.652174
| 0.047431
| 0
| 0
| 0
| 0
| 0.677824
| 0.573222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4ebe0768d71d86c4de32f67a0ba3b59392f95b9a
| 78
|
py
|
Python
|
schema_matcher_api/featurizer.py
|
columbustech/schema_matcher_api
|
bba5d87e924c41a17fbb4ccf0319628d00d047e2
|
[
"BSD-3-Clause"
] | null | null | null |
schema_matcher_api/featurizer.py
|
columbustech/schema_matcher_api
|
bba5d87e924c41a17fbb4ccf0319628d00d047e2
|
[
"BSD-3-Clause"
] | null | null | null |
schema_matcher_api/featurizer.py
|
columbustech/schema_matcher_api
|
bba5d87e924c41a17fbb4ccf0319628d00d047e2
|
[
"BSD-3-Clause"
] | null | null | null |
from .application import Application
class Featurizer(Application):
pass
| 15.6
| 36
| 0.794872
| 8
| 78
| 7.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 4
| 37
| 19.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
14fd74098ee45fc94bc1faf505ed7c4749b2a43b
| 73
|
py
|
Python
|
src/cortex_skill_utils/api_spec/__init__.py
|
wmcabee-cs/cortex-skill-utils
|
e97eb6decb3dae6647154ee459e6cf7987cbc93f
|
[
"MIT"
] | null | null | null |
src/cortex_skill_utils/api_spec/__init__.py
|
wmcabee-cs/cortex-skill-utils
|
e97eb6decb3dae6647154ee459e6cf7987cbc93f
|
[
"MIT"
] | 2
|
2021-03-25T22:40:33.000Z
|
2021-06-01T23:50:42.000Z
|
src/cortex_skill_utils/api_spec/__init__.py
|
wmcabee-cs/cortex-skill-utils
|
e97eb6decb3dae6647154ee459e6cf7987cbc93f
|
[
"MIT"
] | null | null | null |
from .factory import define_api_spec
from .decorate import cortex_action
| 24.333333
| 36
| 0.863014
| 11
| 73
| 5.454545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 37
| 36.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0903fe4b6d2e5793af697ebd7ba1c3b954fe634f
| 315
|
py
|
Python
|
bin/cheers/controllers/forms.py
|
shutingrz/cheers
|
e8885e1dc2e7e717bc0ac13b4678a9cfe4d99a88
|
[
"MIT"
] | null | null | null |
bin/cheers/controllers/forms.py
|
shutingrz/cheers
|
e8885e1dc2e7e717bc0ac13b4678a9cfe4d99a88
|
[
"MIT"
] | null | null | null |
bin/cheers/controllers/forms.py
|
shutingrz/cheers
|
e8885e1dc2e7e717bc0ac13b4678a9cfe4d99a88
|
[
"MIT"
] | null | null | null |
from cheers.util import Util
from flask_wtf import FlaskForm
from wtforms import StringField, validators
class LoginForm(FlaskForm):
user_id = StringField('user_id', [validators.length(min=1, max=Util.MaxUserIdLength)])
password = StringField('password', [validators.length(min=1, max=Util.MaxUserPassLength)])
| 35
| 91
| 0.796825
| 40
| 315
| 6.2
| 0.525
| 0.048387
| 0.153226
| 0.16129
| 0.217742
| 0.217742
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.092063
| 315
| 8
| 92
| 39.375
| 0.86014
| 0
| 0
| 0
| 0
| 0
| 0.047771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
091af48f28617e60426403ff086000b9cb130ff5
| 121
|
py
|
Python
|
NoticeBoard/admin.py
|
MdJunaidMahmood/IITR-Campus-guide
|
feee7217e2170b23da88c80d6e452d0d897be56e
|
[
"MIT"
] | 1
|
2021-08-19T10:04:06.000Z
|
2021-08-19T10:04:06.000Z
|
NoticeBoard/admin.py
|
MdJunaidMahmood/IITR-Campus-guide
|
feee7217e2170b23da88c80d6e452d0d897be56e
|
[
"MIT"
] | null | null | null |
NoticeBoard/admin.py
|
MdJunaidMahmood/IITR-Campus-guide
|
feee7217e2170b23da88c80d6e452d0d897be56e
|
[
"MIT"
] | 2
|
2021-07-10T04:41:50.000Z
|
2021-08-19T10:22:08.000Z
|
from django.contrib import admin
from .models import message
admin.site.register(message)
# Register your models here.
| 17.285714
| 32
| 0.801653
| 17
| 121
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132231
| 121
| 6
| 33
| 20.166667
| 0.92381
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
091c645374e305a19472f540f545194e398b8a50
| 456
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/azurestack/v20170601/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/azurestack/v20170601/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/azurestack/v20170601/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .customer_subscription import *
from .get_customer_subscription import *
from .get_registration import *
from .get_registration_activation_key import *
from .list_product_details import *
from .registration import *
from . import outputs
| 35.076923
| 80
| 0.769737
| 65
| 456
| 5.261538
| 0.661538
| 0.175439
| 0.114035
| 0.175439
| 0.192982
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002577
| 0.149123
| 456
| 12
| 81
| 38
| 0.878866
| 0.445175
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
092ca9729723cd440bef589f6a111ec05d74dc14
| 1,926
|
py
|
Python
|
Python/POO/ex009.py
|
henrique-tavares/Coisas
|
f740518b1bedec5b0ea8c12ae07a2cac21eb51ae
|
[
"MIT"
] | 1
|
2020-02-07T20:39:26.000Z
|
2020-02-07T20:39:26.000Z
|
Python/POO/ex009.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
Python/POO/ex009.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class Animal(metaclass=ABCMeta):
__slots__ = ()
@property
@abstractmethod
def nome(self):
pass
@property
@abstractmethod
def peso(self):
pass
@property
@abstractmethod
def idade(self):
pass
@abstractmethod
def comer(self):
pass
@abstractmethod
def locomover(self):
pass
class Pessoa(Animal):
__slots__ = ("_nome", "_peso", "_idade")
def __init__(self, nome, peso, idade):
self.nome = nome
self.peso = peso
self.idade = idade
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, nome):
self._nome = nome
@property
def peso(self):
return self._peso
@peso.setter
def peso(self, peso):
self._peso = peso
@property
def idade(self):
return self._idade
@idade.setter
def idade(self, idade):
self._idade = idade
def comer(self, comida):
print(f"{self.nome} está comendo {comida}")
def locomover(self):
print(f"{self.nome} está andando sobre duas pernas")
class Gato(Animal):
__slots__ = ("_nome", "_peso", "_idade")
def __init__(self, nome, peso, idade):
self.nome = nome
self.peso = peso
self.idade = idade
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, nome):
self._nome = nome
@property
def peso(self):
return self._peso
@peso.setter
def peso(self, peso):
self._peso = peso
@property
def idade(self):
return self._idade
@idade.setter
def idade(self, idade):
self._idade = idade
def comer(self):
print(f"{self.nome} está comendo sua ração")
def locomover(self):
print(f"{self.nome} está andando sobre quatro patas")
| 17.509091
| 61
| 0.577882
| 224
| 1,926
| 4.799107
| 0.160714
| 0.104186
| 0.066977
| 0.052093
| 0.779535
| 0.71814
| 0.671628
| 0.671628
| 0.671628
| 0.671628
| 0
| 0
| 0.316199
| 1,926
| 109
| 62
| 17.669725
| 0.816249
| 0
| 0
| 0.87013
| 0
| 0
| 0.095535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.298701
| false
| 0.064935
| 0.012987
| 0.077922
| 0.467532
| 0.051948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
11995caf6d09b033583358c91e92ff3c5b50573f
| 127
|
py
|
Python
|
dyalog_kernel/__main__.py
|
RojerGS/dyalog-jupyter-kernel
|
068bd0761001f7516144f44982a66ca5f18c1634
|
[
"MIT"
] | 49
|
2018-03-29T15:55:54.000Z
|
2022-03-25T01:25:00.000Z
|
dyalog_kernel/__main__.py
|
RojerGS/dyalog-jupyter-kernel
|
068bd0761001f7516144f44982a66ca5f18c1634
|
[
"MIT"
] | 52
|
2018-02-12T14:36:55.000Z
|
2022-03-10T09:45:33.000Z
|
dyalog_kernel/__main__.py
|
RojerGS/dyalog-jupyter-kernel
|
068bd0761001f7516144f44982a66ca5f18c1634
|
[
"MIT"
] | 20
|
2018-06-26T16:06:21.000Z
|
2022-03-06T00:17:18.000Z
|
from ipykernel.kernelapp import IPKernelApp
from . import DyalogKernel
IPKernelApp.launch_instance(kernel_class=DyalogKernel)
| 25.4
| 54
| 0.874016
| 14
| 127
| 7.785714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07874
| 127
| 4
| 55
| 31.75
| 0.931624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
11ad3422973fecf8220b7572910b4ca937ad24c2
| 170
|
py
|
Python
|
week04/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | null | null | null |
week04/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | null | null | null |
week04/code11.py
|
byeongal/KMUCP
|
5bafe02c40aae67fc53d9e6cdcb727929368587e
|
[
"MIT"
] | 1
|
2019-11-27T20:28:19.000Z
|
2019-11-27T20:28:19.000Z
|
score = float(input("백분위(0~100)점수를 입력해 주세요 >>"))
if score <= 30:
print("당신의 학점은 A입니다.")
elif score <= 70:
print("당신의 학점은 B입니다.")
else:
print("당신의 학점은 C입니다.")
| 21.25
| 48
| 0.588235
| 28
| 170
| 3.571429
| 0.714286
| 0.24
| 0.33
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 0.211765
| 170
| 8
| 49
| 21.25
| 0.686567
| 0
| 0
| 0
| 0
| 0
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ee9262265da91013d798ff0f2f0b3e2cdb2c6ce2
| 36
|
py
|
Python
|
octopuslab_installer/__init__.py
|
octopusengine/octopus-init-lite
|
025f7be16dad6a055501e889b5f1c280363e2aa0
|
[
"MIT"
] | 2
|
2020-09-14T08:19:02.000Z
|
2020-09-15T16:40:27.000Z
|
octopuslab_installer/__init__.py
|
octopusengine/octopus-init-lite
|
025f7be16dad6a055501e889b5f1c280363e2aa0
|
[
"MIT"
] | null | null | null |
octopuslab_installer/__init__.py
|
octopusengine/octopus-init-lite
|
025f7be16dad6a055501e889b5f1c280363e2aa0
|
[
"MIT"
] | null | null | null |
from .octopuslab_installer import *
| 18
| 35
| 0.833333
| 4
| 36
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ee965940ded3f5ed146c1d130dde816cc6a561c5
| 638
|
py
|
Python
|
allencv/modules/image_encoders/image_encoder.py
|
sethah/allencv
|
1bdc27359f81290e96b290ccda11f7a9905ebf14
|
[
"Apache-2.0"
] | 8
|
2019-05-09T02:48:54.000Z
|
2022-02-14T03:58:54.000Z
|
allencv/modules/image_encoders/image_encoder.py
|
sethah/allencv
|
1bdc27359f81290e96b290ccda11f7a9905ebf14
|
[
"Apache-2.0"
] | null | null | null |
allencv/modules/image_encoders/image_encoder.py
|
sethah/allencv
|
1bdc27359f81290e96b290ccda11f7a9905ebf14
|
[
"Apache-2.0"
] | null | null | null |
from overrides import overrides
from typing import Sequence
import torch
from allennlp.common import Registrable
from allencv.modules.encoder_base import _EncoderBase
class ImageEncoder(_EncoderBase, Registrable):
@overrides
def forward(self, # pylint: disable=arguments-differ
image: torch.Tensor) -> Sequence[torch.Tensor]:
raise NotImplementedError
def get_input_channels(self) -> int:
raise NotImplementedError
def get_output_channels(self) -> Sequence[int]:
raise NotImplementedError
def get_output_scales(self) -> Sequence[int]:
raise NotImplementedError
| 25.52
| 63
| 0.733542
| 68
| 638
| 6.75
| 0.485294
| 0.20915
| 0.176471
| 0.196078
| 0.281046
| 0.169935
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200627
| 638
| 25
| 64
| 25.52
| 0.9
| 0.050157
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.3125
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ee9e7ca829ddf175218577e5532856083ce64e24
| 215
|
py
|
Python
|
plugins/Foosun_cms.py
|
cflq3/getcms
|
6cf07da0ea3ec644866df715cff1f311a46ee378
|
[
"MIT"
] | 22
|
2016-09-01T08:27:07.000Z
|
2021-01-11T13:32:59.000Z
|
plugins/Foosun_cms.py
|
cflq3/getcms
|
6cf07da0ea3ec644866df715cff1f311a46ee378
|
[
"MIT"
] | null | null | null |
plugins/Foosun_cms.py
|
cflq3/getcms
|
6cf07da0ea3ec644866df715cff1f311a46ee378
|
[
"MIT"
] | 20
|
2015-11-07T19:09:48.000Z
|
2018-05-02T03:10:41.000Z
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "sysImages/css/PagesCSS.css", "foosun")
whatweb.recog_from_file(pluginname, "Tags.html", "Foosun")
| 26.875
| 79
| 0.730233
| 29
| 215
| 5.275862
| 0.689655
| 0.156863
| 0.20915
| 0.261438
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005236
| 0.111628
| 215
| 7
| 80
| 30.714286
| 0.795812
| 0.167442
| 0
| 0
| 0
| 0
| 0.267045
| 0.147727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
eeeaaef7c1da337a27d82420844848f279263bb2
| 168
|
py
|
Python
|
4_Backwoods_Forest/174-Forest_Cannon_Dancing/cannon_dance.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
4_Backwoods_Forest/174-Forest_Cannon_Dancing/cannon_dance.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
4_Backwoods_Forest/174-Forest_Cannon_Dancing/cannon_dance.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
def onSpawn():
while True:
pet.moveXY(48, 8)
pet.moveXY(12, 8)
pet.on("spawn", onSpawn)
while True:
hero.say("Run!!!")
hero.say("Faster!")
| 16.8
| 25
| 0.547619
| 24
| 168
| 3.833333
| 0.625
| 0.26087
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048
| 0.255952
| 168
| 9
| 26
| 18.666667
| 0.688
| 0
| 0
| 0.25
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e10ae62a8a83b2696d543a218c2d553876a0cad2
| 14,221
|
py
|
Python
|
tests/subscription_manager/endpoints/test_users.py
|
eurocontrol-swim/subscription-manager
|
95700334cb5d58957043c6c487b56b1dd6641ec0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/subscription_manager/endpoints/test_users.py
|
eurocontrol-swim/subscription-manager
|
95700334cb5d58957043c6c487b56b1dd6641ec0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/subscription_manager/endpoints/test_users.py
|
eurocontrol-swim/subscription-manager
|
95700334cb5d58957043c6c487b56b1dd6641ec0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
import json
from unittest import mock
import pytest
from sqlalchemy.exc import IntegrityError
from werkzeug.security import check_password_hash
from subscription_manager import BASE_PATH
from swim_backend.auth.auth import HASH_METHOD
from swim_backend.db import db_save
from subscription_manager.db.users import get_user_by_id
from tests.subscription_manager.utils import make_user, make_basic_auth_header
from tests.conftest import DEFAULT_LOGIN_PASS
__author__ = "EUROCONTROL (SWIM)"
@pytest.fixture
def generate_user(session):
def _generate_user(is_admin=False):
user = make_user(is_admin=is_admin)
return db_save(session, user)
return _generate_user
def basic_auth_header(user):
return make_basic_auth_header(user.username, DEFAULT_LOGIN_PASS)
def test_get_user__user_does_not_exist__returns_404(test_client, test_admin_user):
url = f'{BASE_PATH}/users/123456'
response = test_client.get(url, headers=basic_auth_header(test_admin_user))
assert 404 == response.status_code
def test_get_user__unauthorized_user__returns_401(test_client, generate_user):
user = generate_user()
user.password = 'password'
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.get(url, headers=make_basic_auth_header('fake_username', 'fake_password'))
assert 401 == response.status_code
response_data = json.loads(response.data)
assert 'Invalid credentials' == response_data['detail']
def test_get_user__non_admin_user__returns_403(test_client, generate_user):
user = generate_user(is_admin=False)
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.get(url, headers=make_basic_auth_header(user.username, 'password'))
assert 403 == response.status_code
response_data = json.loads(response.data)
assert 'Admin rights required' == response_data['detail']
def test_get_user__user_exists_and_is_returned(test_client, generate_user, test_admin_user):
user = generate_user()
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.get(url, headers=basic_auth_header(test_admin_user))
assert 200 == response.status_code
response_data = json.loads(response.data)
assert user.username == response_data['username']
assert user.active == response_data['active']
assert user.is_admin == response_data['is_admin']
def test_get_users__unauthorized_user__returns_401(test_client, generate_user):
users = [generate_user(), generate_user()]
url = f'{BASE_PATH}/users/'
response = test_client.get(url, headers=make_basic_auth_header('fake_username', 'fake_password'))
assert 401 == response.status_code
response_data = json.loads(response.data)
assert 'Invalid credentials' == response_data['detail']
def test_get_users__non_admin_user__returns_403(test_client, generate_user, test_user):
users = [generate_user(), generate_user()]
url = f'{BASE_PATH}/users/'
response = test_client.get(url, headers=basic_auth_header(test_user))
assert 403 == response.status_code
response_data = json.loads(response.data)
assert 'Admin rights required' == response_data['detail']
def test_get_users__users_exist_and_are_returned_as_list(test_client, generate_user, test_admin_user):
users = [generate_user(), generate_user()]
url = f'{BASE_PATH}/users/'
response = test_client.get(url, headers=basic_auth_header(test_admin_user))
assert 200 == response.status_code
response_data = json.loads(response.data)
assert isinstance(response_data, list)
assert 3 == len(response_data) # plus the test_user
@pytest.mark.parametrize('missing_property', ['username', 'password'])
def test_post_user__missing_required_property__returns_400(test_client, missing_property, test_admin_user):
user_data = {
'username': 'username',
'password': 'password'
}
del user_data[missing_property]
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 400 == response.status_code
response_data = json.loads(response.data)
assert f"'{missing_property}' is a required property" == response_data['detail']
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=False)
def test_post_user__password_is_not_strong_enough__returns_400(mock_password_is_strong, test_client, test_admin_user):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 400 == response.status_code
response_data = json.loads(response.data)
assert f"password is not strong enough" == response_data['detail']
@mock.patch('subscription_manager.db.users.save_user', side_effect=IntegrityError(None, None, None))
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=True)
def test_post_user__db_error__returns_409(mock_password_is_strong, mock_create_user, test_client, generate_user,
test_admin_user):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 409 == response.status_code
response_data = json.loads(response.data)
assert "Error while saving user in DB" == response_data['detail']
def test_post_user__unauthorized_user__returns_401(test_client):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=make_basic_auth_header('fake_username', 'fake_password'))
assert 401 == response.status_code
response_data = json.loads(response.data)
assert 'Invalid credentials' == response_data['detail']
def test_post_user__non_admin_user__returns_403(test_client, test_user):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_user))
assert 403 == response.status_code
response_data = json.loads(response.data)
assert 'Admin rights required' == response_data['detail']
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=True)
def test_post_user__user_is_saved_in_db(mock_password_is_strong, test_client, test_admin_user):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/'
response = test_client.post(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 201 == response.status_code
response_data = json.loads(response.data)
assert 'id' in response_data
assert isinstance(response_data['id'], int)
assert user_data['username'] == response_data['username']
db_user = get_user_by_id(response_data['id'])
assert db_user is not None
assert user_data['username'] == db_user.username
assert db_user.password.startswith(HASH_METHOD)
assert check_password_hash(db_user.password, 'password') is True
@mock.patch('subscription_manager.db.users.save_user', side_effect=IntegrityError(None, None, None))
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=True)
def test_put_user__db_error__returns_409(mock_password_is_strong, mock_update_user, test_client, generate_user,
test_admin_user):
user = generate_user()
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 409 == response.status_code
response_data = json.loads(response.data)
assert "Error while saving user in DB" == response_data['detail']
def test_put_user__user_does_not_exist__returns_404(test_client, test_admin_user):
user_data = {
'username': 'username',
'password': 'password'
}
url = f'{BASE_PATH}/users/1234'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 404 == response.status_code
response_data = json.loads(response.data)
assert "User with id 1234 does not exist" == response_data['detail']
def test_put_user__unauthorized_user__returns_401(test_client, generate_user):
user = generate_user()
user_data = {
'username': 'new username',
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=make_basic_auth_header('fake_username', 'fake_password'))
assert 401 == response.status_code
response_data = json.loads(response.data)
assert 'Invalid credentials' == response_data['detail']
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=False)
def test_put_user__update_password__password_not_strong_enough__returns_400(mock_password_is_strong, test_client,
generate_user, test_admin_user):
user = generate_user()
user_data = {
'password': 'new password',
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 400 == response.status_code
response_data = json.loads(response.data)
assert 'password is not strong enough' == response_data['detail']
def test_put_user__non_admin_user__returns_403(test_client, generate_user, test_user):
user = generate_user()
user_data = {
'username': 'new username',
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_user))
assert 403 == response.status_code
response_data = json.loads(response.data)
assert 'Admin rights required' == response_data['detail']
def test_put_user__user_is_updated(test_client, generate_user, test_admin_user):
user = generate_user()
user_data = {
'username': 'new username',
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 200 == response.status_code
response_data = json.loads(response.data)
assert user_data['username'] == response_data['username']
db_user = get_user_by_id(response_data['id'])
assert user_data['username'] == db_user.username
@mock.patch('swim_backend.auth.passwords.is_strong', return_value=True)
def test_put_user__new_password_is_updated_and_hashed_correctly(mock_password_is_strong, test_client, generate_user,
test_admin_user):
user = generate_user()
user_data = {
'password': 'new password',
}
url = f'{BASE_PATH}/users/{user.id}'
response = test_client.put(url, data=json.dumps(user_data), content_type='application/json',
headers=basic_auth_header(test_admin_user))
assert 200 == response.status_code
response_data = json.loads(response.data)
db_user = get_user_by_id(response_data['id'])
assert db_user.password.startswith(HASH_METHOD)
assert check_password_hash(db_user.password, 'new password') is True
| 35.200495
| 121
| 0.711553
| 1,869
| 14,221
| 5.092028
| 0.133226
| 0.080698
| 0.032783
| 0.025218
| 0.754545
| 0.743196
| 0.730377
| 0.707996
| 0.689083
| 0.680361
| 0
| 0.011044
| 0.185008
| 14,221
| 403
| 122
| 35.287841
| 0.810095
| 0.126011
| 0
| 0.677824
| 0
| 0
| 0.162628
| 0.04742
| 0
| 0
| 0
| 0
| 0.209205
| 1
| 0.096234
| false
| 0.158996
| 0.046025
| 0.004184
| 0.154812
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e1102ba3880791221f38dda8e9386b654dfa95fe
| 451
|
py
|
Python
|
EgammaAnalysis/CSA07Skims/python/EgammaZPlusEMOrJetPath_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2019-08-09T08:42:11.000Z
|
2019-08-09T08:42:11.000Z
|
EgammaAnalysis/CSA07Skims/python/EgammaZPlusEMOrJetPath_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
EgammaAnalysis/CSA07Skims/python/EgammaZPlusEMOrJetPath_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2019-04-03T19:23:27.000Z
|
2019-04-03T19:23:27.000Z
|
import FWCore.ParameterSet.Config as cms
from EgammaAnalysis.CSA07Skims.EgammaZJetToEleHLT_cfi import *
from EgammaAnalysis.CSA07Skims.EgammaZJetToMuHLT_cfi import *
from EgammaAnalysis.CSA07Skims.EgammaZJetToElePlusProbe_cfi import *
from EgammaAnalysis.CSA07Skims.EgammaZJetToMuPlusProbe_cfi import *
electronFilterZPath = cms.Path(EgammaZJetToEleHLT+EgammaZJetToElePlusProbe)
muonFilterZPath = cms.Path(EgammaZJetToMuHLT+EgammaZJetToMuPlusProbe)
| 45.1
| 75
| 0.884701
| 40
| 451
| 9.875
| 0.425
| 0.182278
| 0.283544
| 0.205063
| 0.281013
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018913
| 0.062084
| 451
| 9
| 76
| 50.111111
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
012119d9c53adbee1d9e47a9f364a943e81b1e93
| 31
|
py
|
Python
|
Sim_ATAV/simulation_configurator/__init__.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | 3
|
2020-02-28T12:04:26.000Z
|
2022-02-27T00:42:56.000Z
|
Sim_ATAV/vehicle_control/trial_controller/__init__.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | null | null | null |
Sim_ATAV/vehicle_control/trial_controller/__init__.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | null | null | null |
# Author: Cumhur Erkan Tuncali
| 15.5
| 30
| 0.774194
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 1
| 31
| 31
| 0.923077
| 0.903226
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
012bac8ea112937cc08973a4ca408097202d790f
| 2,943
|
py
|
Python
|
test/test_queue.py
|
shenxiangzhuang/ToyData
|
66489267cc7a438215e0d30d751ae2a54301b513
|
[
"MIT"
] | 4
|
2020-02-02T08:11:14.000Z
|
2020-04-07T15:40:45.000Z
|
test/test_queue.py
|
shenxiangzhuang/ToyData
|
66489267cc7a438215e0d30d751ae2a54301b513
|
[
"MIT"
] | null | null | null |
test/test_queue.py
|
shenxiangzhuang/ToyData
|
66489267cc7a438215e0d30d751ae2a54301b513
|
[
"MIT"
] | null | null | null |
import unittest
from toydata.Queue import ArrayQueue, LinkedQueue, ArrayDeque, LinkedDeque
class testArrayQueue(unittest.TestCase):
def test_init(self):
q = ArrayQueue()
self.assertTrue(q.is_empty())
def test_is_empty(self):
q = ArrayQueue()
self.assertTrue(q.is_empty())
q.enqueue(1)
self.assertFalse(q.is_empty())
def test_first(self):
q = ArrayQueue()
q.enqueue(1)
self.assertEqual(q.first(), 1)
q.enqueue(2)
self.assertEqual(q.first(), 1)
def test_dequeue(self):
q = ArrayQueue()
q.enqueue(1)
q.dequeue()
self.assertTrue(q.is_empty)
def test_enqueue(self):
q = ArrayQueue()
q.enqueue(1)
self.assertEqual(q.first(), 1)
class testLinkedQueue(unittest.TestCase):
def test_init(self):
q = LinkedQueue()
self.assertTrue(q.is_empty())
def test_is_empty(self):
q = LinkedQueue()
self.assertTrue(q.is_empty())
q.enqueue(1)
self.assertFalse(q.is_empty())
def test_first(self):
q = LinkedQueue()
q.enqueue(1)
self.assertEqual(q.first(), 1)
q.enqueue(2)
self.assertEqual(q.first(), 1)
def test_dequeue(self):
q = LinkedQueue()
q.enqueue(1)
q.dequeue()
self.assertTrue(q.is_empty)
def test_enqueue(self):
q = LinkedQueue()
q.enqueue(1)
self.assertEqual(q.first(), 1)
class testArrayDeque(unittest.TestCase):
def test_add_last(self):
q = ArrayDeque()
q.add_last(1)
self.assertEqual(q.last(), 1)
def test_last(self):
q = ArrayDeque()
q.add_last(1)
self.assertEqual(q.last(), 1)
q.add_last(2)
self.assertEqual(q.last(), 2)
def test_delete_last(self):
q = ArrayDeque()
q.add_last(1)
q.add_last(2)
q.delete_last()
self.assertEqual(q.last(), 1)
def test_add_first(self):
q = ArrayDeque()
q.add_first(1)
q.add_first(0)
self.assertEqual(q.last(), 1)
def test_first(self):
q = ArrayDeque()
q.add_first(1)
self.assertEqual(q.first(), 1)
def test_delete_first(self):
q = ArrayDeque()
q.add_last(1)
q.add_first(0)
q.delete_first()
self.assertEqual(q.first(), 1)
class testLinkedDeque(unittest.TestCase):
def test_first(self):
q = LinkedDeque()
q.insert_first(1)
self.assertEqual(q.first(), 1)
q.insert_first(2)
self.assertEqual(q.first(), 2)
q.delete_first()
self.assertEqual(q.first(), 1)
def test_last(self):
q = LinkedDeque()
q.insert_last(1)
self.assertEqual(q.last(), 1)
q.insert_last(2)
self.assertEqual(q.last(), 2)
q.delete_last()
self.assertEqual(q.last(), 1)
| 24.122951
| 74
| 0.567788
| 377
| 2,943
| 4.299735
| 0.092838
| 0.175817
| 0.187539
| 0.142505
| 0.862431
| 0.806292
| 0.779766
| 0.65145
| 0.520666
| 0.479951
| 0
| 0.020813
| 0.297995
| 2,943
| 121
| 75
| 24.322314
| 0.763795
| 0
| 0
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.181818
| false
| 0
| 0.020202
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
017cbe791572c6ae9f94b822d09e39c61e77c88b
| 30
|
py
|
Python
|
Sort/14_10_Counting_Sort.py
|
misscindy/Interview
|
eab43da97e61fcc3d0278408f8f4ea709eed14e6
|
[
"CC0-1.0"
] | null | null | null |
Sort/14_10_Counting_Sort.py
|
misscindy/Interview
|
eab43da97e61fcc3d0278408f8f4ea709eed14e6
|
[
"CC0-1.0"
] | 1
|
2015-04-23T20:05:24.000Z
|
2015-04-23T20:07:45.000Z
|
Sort/14_10_Counting_Sort.py
|
misscindy/Interview
|
eab43da97e61fcc3d0278408f8f4ea709eed14e6
|
[
"CC0-1.0"
] | null | null | null |
# Given a list of person class
| 30
| 30
| 0.766667
| 6
| 30
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 1
| 30
| 30
| 0.958333
| 0.933333
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09b165468ddbf64822e467edf519bdacea35f62a
| 261
|
py
|
Python
|
core/admin.py
|
Wanderer2436/django_pharmacy
|
2e12c41e30f2f2e2c0f3abdaded98a917420f5b8
|
[
"MIT"
] | null | null | null |
core/admin.py
|
Wanderer2436/django_pharmacy
|
2e12c41e30f2f2e2c0f3abdaded98a917420f5b8
|
[
"MIT"
] | 2
|
2022-03-31T14:34:44.000Z
|
2022-03-31T14:35:17.000Z
|
core/admin.py
|
Wanderer2436/django_pharmacy
|
2e12c41e30f2f2e2c0f3abdaded98a917420f5b8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
import core.models
admin.site.register(core.models.Category)
admin.site.register(core.models.Product)
admin.site.register(core.models.Pharmacy)
admin.site.register(core.models.Available)
admin.site.register(core.models.Review)
| 29
| 42
| 0.831418
| 38
| 261
| 5.710526
| 0.368421
| 0.276498
| 0.391705
| 0.483871
| 0.62212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 261
| 8
| 43
| 32.625
| 0.871486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09b89554b7ff4b7ec3fea7631109b0962ddc10d3
| 26
|
py
|
Python
|
python/testData/stubs/ParameterAnnotation.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/stubs/ParameterAnnotation.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/stubs/ParameterAnnotation.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def func(x: int):
pass
| 13
| 17
| 0.576923
| 5
| 26
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 26
| 2
| 18
| 13
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
09b9b2544018554e9275dd22d34fd0a62bc0fe5e
| 53,806
|
py
|
Python
|
StackEPI/ML/EPIconst.py
|
20032303092/StackEPI
|
106c0f3142f55aead0259a5f7f4d21a14fb8dcef
|
[
"CECILL-B"
] | null | null | null |
StackEPI/ML/EPIconst.py
|
20032303092/StackEPI
|
106c0f3142f55aead0259a5f7f4d21a14fb8dcef
|
[
"CECILL-B"
] | null | null | null |
StackEPI/ML/EPIconst.py
|
20032303092/StackEPI
|
106c0f3142f55aead0259a5f7f4d21a14fb8dcef
|
[
"CECILL-B"
] | null | null | null |
class EPIconst:
class FeatureName:
pseknc = "pseknc"
cksnap = "cksnap"
dpcp = "dpcp"
eiip = "eiip"
kmer = "kmer"
tpcp = "tpcp"
all = sorted([pseknc, cksnap, dpcp, eiip, kmer, tpcp])
class CellName:
K562 = "K562"
NHEK = "NHEK"
IMR90 = "IMR90"
HeLa_S3 = "HeLa-S3"
HUVEC = "HUVEC"
GM12878 = "GM12878"
all = sorted([GM12878, HeLa_S3, HUVEC, IMR90, K562, NHEK])
class MethodName:
ensemble = "meta"
xgboost = "xgboost"
svm = "svm"
deepforest = "deepforest"
lightgbm = "lightgbm"
rf = "rf"
all = sorted([lightgbm, rf, xgboost, svm, deepforest])
class ModelInitParams:
logistic = {"n_jobs": 13, }
mlp = {}
deepforest = {"n_jobs": 13, "use_predictor": False, "random_state": 1, "predictor": 'forest', "verbose": 0}
lightgbm = {"n_jobs": 13, 'max_depth': -1, 'num_leaves': 31,
'min_child_samples': 20,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0,
'reg_alpha': 0.0, 'reg_lambda': 0.0,
'min_split_gain': 0.0,
'objective': None,
'n_estimators': 100, 'learning_rate': 0.1,
'device': 'gpu', 'boosting_type': 'gbdt',
'class_weight': None, 'importance_type': 'split',
'min_child_weight': 0.001, 'random_state': None,
'subsample_for_bin': 200000, 'silent': True}
rf = {"n_jobs": 13, 'n_estimators': 100, "max_depth": None, 'min_samples_split': 2, "min_samples_leaf": 1,
'max_features': 'auto'}
svm = {"probability": True}
xgboost = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1,
'use_label_encoder': False, 'eval_metric': 'logloss', 'tree_method': 'gpu_hist'}
class BaseModelParams:
GM12878_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 250}
GM12878_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 125, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 250}
GM12878_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
GM12878_cksnap_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
GM12878_cksnap_rf = {'n_estimators': 340, 'max_depth': 114, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'sqrt'}
"----------------------------------------------"
GM12878_dpcp_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300}
GM12878_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 331, 'max_bin': 135, 'min_child_samples': 190,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.9,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
GM12878_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
GM12878_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3,
'learning_rate': 0.1}
GM12878_dpcp_rf = {'n_estimators': 150, 'max_depth': 88, 'min_samples_leaf': 1, 'min_samples_split': 3,
'max_features': "sqrt"}
"----------------------------------------------"
GM12878_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 300}
GM12878_eiip_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
GM12878_eiip_rf = {'n_estimators': 280, 'max_depth': None, 'min_samples_leaf': 1, 'min_samples_split': 7,
'max_features': "sqrt"}
GM12878_eiip_svm = {'C': 1.0, 'gamma': 2048.0, 'kernel': 'rbf'}
GM12878_eiip_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_kmer_deepforest = {'max_layers': 25, 'n_estimators': 5,
'n_trees': 400}
GM12878_kmer_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40,
'colsample_bytree': 1.0, 'subsample': 0.8, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
GM12878_kmer_rf = {'n_estimators': 170, 'max_depth': 41, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'sqrt'}
GM12878_kmer_svm = {'C': 2.0, 'gamma': 128.0,
'kernel': 'rbf'}
GM12878_kmer_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400}
GM12878_pseknc_lightgbm = {'max_depth': 11, 'num_leaves': 291, 'max_bin': 185, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
GM12878_pseknc_rf = {'n_estimators': 250, 'max_depth': 41, 'min_samples_leaf': 2, 'min_samples_split': 6,
'max_features': 'log2'}
GM12878_pseknc_svm = {'C': 0.5, 'gamma': 1024.0, 'kernel': 'rbf'}
GM12878_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 1, 'gamma': 0.1,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_tpcp_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 100}
GM12878_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 175, 'min_child_samples': 80,
'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 20, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
GM12878_tpcp_rf = {'n_estimators': 250, 'max_depth': 89, 'min_samples_leaf': 2, 'min_samples_split': 9,
'max_features': "log2"}
GM12878_tpcp_svm = {'C': 16.0, 'gamma': 64.0,
'kernel': 'rbf'}
GM12878_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
HeLa_S3_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300}
HeLa_S3_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 105, 'min_child_samples': 80,
'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.1,
'reg_lambda': 0.1, 'min_split_gain': 0.4, 'learning_rate': 0.1, 'n_estimators': 150}
HeLa_S3_cksnap_svm = {'C': 128.0, 'gamma': 128.0,
'kernel': 'rbf'}
HeLa_S3_cksnap_rf = {'n_estimators': 340, 'max_depth': 44, 'min_samples_leaf': 1, 'min_samples_split': 5,
'max_features': 'sqrt'}
HeLa_S3_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 3, 'reg_lambda': 0.5,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400}
HeLa_S3_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 221, 'max_bin': 155, 'min_child_samples': 180,
'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 200}
HeLa_S3_dpcp_rf = {'n_estimators': 70, 'max_depth': 32, 'min_samples_leaf': 1, 'min_samples_split': 8,
'max_features': 'sqrt'}
HeLa_S3_dpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'}
HeLa_S3_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 200}
HeLa_S3_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 5, 'min_child_samples': 110,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100}
HeLa_S3_eiip_rf = {'n_estimators': 180, 'max_depth': 138, 'min_samples_leaf': 6, 'min_samples_split': 10,
'max_features': 'sqrt'}
HeLa_S3_eiip_svm = {'C': 2.0, 'gamma': 1024.0,
'kernel': 'rbf'}
HeLa_S3_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_kmer_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 200}
HeLa_S3_kmer_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 165, 'min_child_samples': 90,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 0.001,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125}
HeLa_S3_kmer_rf = {'n_estimators': 240, 'max_depth': 77, 'min_samples_leaf': 2, 'min_samples_split': 2,
'max_features': 'sqrt'}
HeLa_S3_kmer_svm = {'C': 8.0, 'gamma': 128.0,
'kernel': 'rbf'}
HeLa_S3_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200}
HeLa_S3_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 261, 'max_bin': 25, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
HeLa_S3_pseknc_rf = {'n_estimators': 330, 'max_depth': 118, 'min_samples_leaf': 1, 'min_samples_split': 8,
'max_features': 'log2'}
HeLa_S3_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'}
HeLa_S3_pseknc_xgboost = {'n_estimators': 750, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.1, 'reg_lambda': 2,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 250}
HeLa_S3_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 341, 'max_bin': 45, 'min_child_samples': 10,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 250}
HeLa_S3_tpcp_rf = {'n_estimators': 320, 'max_depth': 99, 'min_samples_leaf': 1, 'min_samples_split': 10,
'max_features': 'sqrt'}
HeLa_S3_tpcp_svm = {'C': 4.0, 'gamma': 32.0,
'kernel': 'rbf'}
HeLa_S3_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
HUVEC_cksnap_deepforest = {"max_layers": 10, "n_estimators": 2,
"n_trees": 200}
HUVEC_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 271, 'max_bin': 45, 'min_child_samples': 10,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.5,
'reg_lambda': 0.5, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175}
HUVEC_cksnap_rf = {'n_estimators': 270, 'max_depth': 38, 'min_samples_leaf': 2, 'min_samples_split': 2,
'max_features': "auto"}
HUVEC_cksnap_svm = {'C': 8.0, 'gamma': 64.0, 'kernel': 'rbf'}
HUVEC_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400}
HUVEC_dpcp_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 245, 'min_child_samples': 30,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.5,
'reg_lambda': 0.3, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
HUVEC_dpcp_rf = {'n_estimators': 300, 'max_depth': 61, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'log2'}
HUVEC_dpcp_svm = {'C': 4.0, 'gamma': 16.0, 'kernel': 'rbf'}
HUVEC_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 300}
HUVEC_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
HUVEC_eiip_rf = {'n_estimators': 310, 'max_depth': 28, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'sqrt'}
HUVEC_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'}
HUVEC_eiip_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 300}
HUVEC_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 5, 'min_child_samples': 170,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.5,
'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125}
HUVEC_kmer_rf = {'n_estimators': 230, 'max_depth': 59, 'min_samples_leaf': 1, 'min_samples_split': 4,
'max_features': 'auto'}
HUVEC_kmer_svm = {'C': 4.0, 'gamma': 64.0,
'kernel': 'rbf'}
HUVEC_kmer_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400}
HUVEC_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 115, 'min_child_samples': 190,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175}
HUVEC_pseknc_rf = {'n_estimators': 310, 'max_depth': 42, 'min_samples_leaf': 2, 'min_samples_split': 7,
'max_features': 'sqrt'}
HUVEC_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'}
HUVEC_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150}
HUVEC_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 35, 'min_child_samples': 190,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
HUVEC_tpcp_rf = {'n_estimators': 330, 'max_depth': 121, 'min_samples_leaf': 2, 'min_samples_split': 5,
'max_features': "sqrt"}
HUVEC_tpcp_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'}
HUVEC_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
IMR90_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 250}
IMR90_cksnap_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 95, 'min_child_samples': 60,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.3, 'learning_rate': 0.1, 'n_estimators': 225}
IMR90_cksnap_rf = {'n_estimators': 280, 'max_depth': 124, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'auto'}
IMR90_cksnap_svm = {'C': 16.0, 'gamma': 16.0, 'kernel': 'rbf'}
IMR90_cksnap_xgboost = {'n_estimators': 900, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0.4,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
IMR90_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 281, 'max_bin': 115, 'min_child_samples': 20,
'colsample_bytree': 0.7, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.5, 'learning_rate': 0.1, 'n_estimators': 125}
IMR90_dpcp_rf = {'n_estimators': 70, 'max_depth': 116, 'min_samples_leaf': 1, 'min_samples_split': 9,
'max_features': 'log2'}
IMR90_dpcp_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.05, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 350}
IMR90_eiip_lightgbm = {'max_depth': 13, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 50,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 80, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.4, 'learning_rate': 0.2, 'n_estimators': 200}
IMR90_eiip_rf = {'n_estimators': 240, 'max_depth': 78, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'auto'}
IMR90_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'}
IMR90_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 250}
IMR90_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 175, 'min_child_samples': 120,
'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 0.7,
'reg_lambda': 0.9, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
IMR90_kmer_rf = {'n_estimators': 280, 'max_depth': 79, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'auto'}
IMR90_kmer_svm = {'C': 2.0, 'gamma': 64.0,
'kernel': 'rbf'}
IMR90_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0.2,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300}
IMR90_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 15, 'min_child_samples': 50,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
IMR90_pseknc_rf = {'n_estimators': 240, 'max_depth': 96, 'min_samples_leaf': 3, 'min_samples_split': 4,
'max_features': 'auto'}
IMR90_pseknc_svm = {'C': 4.0, 'gamma': 1024.0,
'kernel': 'rbf'}
IMR90_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0.2,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300}
IMR90_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 35, 'min_child_samples': 60,
'colsample_bytree': 0.6, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.5, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 100}
IMR90_tpcp_rf = {'n_estimators': 290, 'max_depth': 71, 'min_samples_leaf': 5, 'min_samples_split': 4,
'max_features': 'auto'}
IMR90_tpcp_svm = {'C': 1.0, 'gamma': 512.0, 'kernel': 'rbf'}
IMR90_tpcp_xgboost = {'n_estimators': 950, 'max_depth': 7, 'min_child_weight': 5, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.5,
'learning_rate': 0.1}
"=============================================="
K562_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 400}
K562_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 225, 'min_child_samples': 60,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 250}
K562_cksnap_rf = {'n_estimators': 330, 'max_depth': 109, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'sqrt'}
K562_cksnap_svm = {'C': 16.0, 'gamma': 32.0, 'kernel': 'rbf'}
K562_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 0.05,
'learning_rate': 0.1}
"----------------------------------------------"
K562_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2,
"n_trees": 150}
K562_dpcp_lightgbm = {'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 1e-05,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 225}
K562_dpcp_rf = {'n_estimators': 240, 'max_depth': 127, 'min_samples_leaf': 1, 'min_samples_split': 6,
'max_features': 'sqrt'}
K562_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_dpcp_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.05,
'learning_rate': 0.1}
"----------------------------------------------"
K562_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 150}
K562_eiip_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 225, 'min_child_samples': 110,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 150}
K562_eiip_rf = {'n_estimators': 120, 'max_depth': 93, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': 'auto'}
K562_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'}
K562_eiip_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
K562_kmer_deepforest = {'max_layers': 15, 'n_estimators': 5,
'n_trees': 150}
K562_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 5, 'min_child_samples': 70,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
K562_kmer_rf = {'n_estimators': 290, 'max_depth': 137, 'min_samples_leaf': 10, 'min_samples_split': 7,
'max_features': "auto"}
K562_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_kmer_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
K562_pseknc_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 300}
K562_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 65, 'min_child_samples': 200,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
K562_pseknc_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 1, 'min_samples_split': 6,
'max_features': 'log2'}
K562_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'}
K562_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
K562_tpcp_deepforest = {'max_layers': 20, 'n_estimators': 2,
'n_trees': 300}
K562_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 105, 'min_child_samples': 130,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
K562_tpcp_rf = {'n_estimators': 280, 'max_depth': 143, 'min_samples_leaf': 5, 'min_samples_split': 2,
'max_features': 'sqrt'}
K562_tpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
NHEK_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 400}
NHEK_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 205, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75}
NHEK_cksnap_rf = {'n_estimators': 300, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': 'auto'}
NHEK_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
NHEK_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 5, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_dpcp_deepforest = {"max_layers": 10, "n_estimators": 8, "n_trees": 200}
NHEK_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 301, 'max_bin': 145, 'min_child_samples': 70,
'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.9,
'reg_lambda': 1.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
NHEK_dpcp_rf = {'n_estimators': 300, 'max_depth': 138, 'min_samples_leaf': 1, 'min_samples_split': 5,
'max_features': 'auto'}
NHEK_dpcp_svm = {'C': 8.0, 'gamma': 16.0, 'kernel': 'rbf'}
NHEK_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 9, 'min_child_weight': 3, 'gamma': 0.5,
'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 100}
NHEK_eiip_lightgbm = {'max_depth': 11, 'num_leaves': 231, 'max_bin': 255, 'min_child_samples': 70,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
NHEK_eiip_rf = {'n_estimators': 230, 'max_depth': 56, 'min_samples_leaf': 2, 'min_samples_split': 6,
'max_features': 'log2'}
NHEK_eiip_svm = {'C': 8.0, 'gamma': 512.0, 'kernel': 'rbf'}
NHEK_eiip_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
NHEK_kmer_lightgbm = {'max_depth': 13, 'num_leaves': 261, 'max_bin': 115, 'min_child_samples': 60,
'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.001, 'min_split_gain': 1.0, 'learning_rate': 0.1, 'n_estimators': 150}
NHEK_kmer_rf = {'n_estimators': 60, 'max_depth': 117, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': "auto"}
NHEK_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
NHEK_kmer_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 150}
NHEK_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 271, 'max_bin': 155, 'min_child_samples': 20,
'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 60, 'reg_alpha': 0.1,
'reg_lambda': 1e-05, 'min_split_gain': 0.7, 'learning_rate': 0.1, 'n_estimators': 75}
NHEK_pseknc_rf = {'n_estimators': 190, 'max_depth': 85, 'min_samples_leaf': 1, 'min_samples_split': 10,
'max_features': 'auto'}
NHEK_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'}
NHEK_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.1, 'reg_lambda': 3,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
NHEK_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 241, 'max_bin': 15, 'min_child_samples': 90,
'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 40, 'reg_alpha': 0.001,
'reg_lambda': 0.001, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100}
NHEK_tpcp_rf = {'n_estimators': 120, 'max_depth': 115, 'min_samples_leaf': 1, 'min_samples_split': 4,
'max_features': 'auto'}
NHEK_tpcp_svm = {'C': 1.0, 'gamma': 128.0, 'kernel': 'rbf'}
NHEK_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.01,
'learning_rate': 0.1}
class MetaModelParams:
################# GM12878 ######################
GM12878_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 32}
GM12878_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 8}
GM12878_6f5m_prob_logistic = {'C': 2.900000000000001}
GM12878_4f2m_prob_logistic = {'C': 0.9000000000000001}
GM12878_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400}
GM12878_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 200}
GM12878_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 200,
'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 30, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 50}
GM12878_4f2m_prob_lightgbm = {'max_depth': 11, 'num_leaves': 311, 'max_bin': 85, 'min_child_samples': 150,
'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 75}
GM12878_6f5m_prob_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 9, 'min_samples_split': 5,
'max_features': 'auto'}
GM12878_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 53, 'min_samples_leaf': 6, 'min_samples_split': 7,
'max_features': 'log2'}
GM12878_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
GM12878_4f2m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
GM12878_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
GM12878_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.05}
################# HeLa_S3 ######################
HeLa_S3_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'relu', 'hidden_layer_sizes': 32}
HeLa_S3_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': (16, 32)}
HeLa_S3_6f5m_prob_logistic = {'C': 1.9000000000000004}
HeLa_S3_4f2m_prob_logistic = {'C': 0.5000000000000001}
HeLa_S3_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 10, 'n_trees': 400}
HeLa_S3_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400}
HeLa_S3_6f5m_prob_lightgbm = {'max_depth': 5, 'num_leaves': 281, 'max_bin': 175, 'min_child_samples': 180,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2,
'n_estimators': 150}
HeLa_S3_4f2m_prob_lightgbm = {'max_depth': 3, 'num_leaves': 311, 'max_bin': 35, 'min_child_samples': 20,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 1.0,
'n_estimators': 125}
HeLa_S3_6f5m_prob_rf = {'n_estimators': 130, 'max_depth': 20, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'sqrt'}
HeLa_S3_4f2m_prob_rf = {'n_estimators': 210, 'max_depth': 117, 'min_samples_leaf': 2, 'min_samples_split': 5,
'max_features': 'auto'}
HeLa_S3_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'}
HeLa_S3_4f2m_prob_svm = {'C': 0.25, 'gamma': 0.0625, 'kernel': 'rbf'}
HeLa_S3_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05,
'learning_rate': 0.1}
HeLa_S3_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05,
'learning_rate': 0.1}
################# HUVEC ########################
HUVEC_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': 8}
HUVEC_4f2m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': (8, 16)}
HUVEC_6f5m_prob_logistic = {'C': 2.900000000000001}
HUVEC_4f2m_prob_logistic = {'C': 0.9000000000000001}
HUVEC_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 250}
HUVEC_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400}
HUVEC_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 311, 'max_bin': 45, 'min_child_samples': 170,
'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 10, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 100}
HUVEC_4f2m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 261, 'max_bin': 45, 'min_child_samples': 180,
'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 10, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 200}
HUVEC_6f5m_prob_rf = {'n_estimators': 290, 'max_depth': 105, 'min_samples_leaf': 5, 'min_samples_split': 2,
'max_features': 'log2'}
HUVEC_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'log2'}
HUVEC_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'}
HUVEC_4f2m_prob_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
HUVEC_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.02,
'learning_rate': 0.05}
HUVEC_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.02,
'learning_rate': 0.01}
################# IMR90 ########################
IMR90_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'identity', 'hidden_layer_sizes': (16, 32)}
IMR90_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': (8, 16)}
IMR90_6f5m_prob_logistic = {'C': 2.5000000000000004}
IMR90_4f2m_prob_logistic = {'C': 2.5000000000000004}
IMR90_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 8, 'n_trees': 300}
IMR90_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 200}
IMR90_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 85, 'min_child_samples': 70,
'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
IMR90_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 55, 'min_child_samples': 60,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 30, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 175}
IMR90_6f5m_prob_rf = {'n_estimators': 340, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 3,
'max_features': 'log2'}
IMR90_4f2m_prob_rf = {'n_estimators': 270, 'max_depth': 120, 'min_samples_leaf': 10, 'min_samples_split': 7,
'max_features': 'log2'}
IMR90_6f5m_prob_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_4f2m_prob_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.9, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.05}
IMR90_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.07}
################# K562 #########################
K562_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'logistic', 'hidden_layer_sizes': (8, 16)}
K562_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': 8}
K562_6f5m_prob_logistic = {'C': 2.900000000000001}
K562_4f2m_prob_logistic = {'C': 0.1}
K562_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400}
K562_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 300}
K562_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 65, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.07,
'n_estimators': 75}
K562_4f2m_prob_lightgbm = {'max_depth': 13, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.75, 'n_estimators': 175}
K562_6f5m_prob_rf = {'n_estimators': 180, 'max_depth': 35, 'min_samples_leaf': 7, 'min_samples_split': 5,
'max_features': 'log2'}
K562_4f2m_prob_rf = {'n_estimators': 80, 'max_depth': 130, 'min_samples_leaf': 6, 'min_samples_split': 5,
'max_features': 'log2'}
K562_6f5m_prob_svm = {'C': 0.5, 'gamma': 0.0625, 'kernel': 'rbf'}
K562_4f2m_prob_svm = {'C': 1.0, 'gamma': 0.0625, 'kernel': 'rbf'}
K562_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.1}
K562_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.01}
################# NHEK #########################
NHEK_6f5m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 32}
NHEK_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': (16, 32)}
NHEK_6f5m_prob_logistic = {'C': 0.9000000000000001}
NHEK_4f2m_prob_logistic = {'C': 0.1}
NHEK_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 50}
NHEK_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 50}
NHEK_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 291, 'max_bin': 45, 'min_child_samples': 140,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 1.0,
'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
NHEK_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 35, 'min_child_samples': 100,
'colsample_bytree': 0.8, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 100}
NHEK_6f5m_prob_rf = {'n_estimators': 70, 'max_depth': 106, 'min_samples_leaf': 10, 'min_samples_split': 9,
'max_features': 'log2'}
NHEK_4f2m_prob_rf = {'n_estimators': 130, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 4,
'max_features': 'sqrt'}
NHEK_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
NHEK_4f2m_prob_svm = {'C': 2.0, 'gamma': 16.0, 'kernel': 'rbf'}
NHEK_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.07}
NHEK_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0.4,
'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0.05, 'reg_lambda': 1,
'learning_rate': 1.0}
if __name__ == '_main_':
print(getattr(EPIconst.BaseModelParams, "NHEK_tpcp_deepforest"))
| 74.626907
| 120
| 0.511653
| 6,553
| 53,806
| 3.855028
| 0.039829
| 0.08491
| 0.049402
| 0.04489
| 0.902621
| 0.873565
| 0.793841
| 0.716966
| 0.688504
| 0.60589
| 0
| 0.107977
| 0.30032
| 53,806
| 720
| 121
| 74.730556
| 0.563046
| 0.000781
| 0
| 0.339089
| 0
| 0
| 0.376714
| 0.030083
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00157
| 0
| 0.012559
| 0.00157
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09f85089f4c897040aa05567bd2bf236e62b47f9
| 46
|
py
|
Python
|
nes/bus/devices/joy/__init__.py
|
Hexadorsimal/pynes
|
dbb3d40c1240fa27f70fa798bcec09188755eec2
|
[
"MIT"
] | 1
|
2017-05-13T18:57:09.000Z
|
2017-05-13T18:57:09.000Z
|
nes/bus/devices/joy/__init__.py
|
Hexadorsimal/py6502
|
dbb3d40c1240fa27f70fa798bcec09188755eec2
|
[
"MIT"
] | 7
|
2020-10-24T17:16:56.000Z
|
2020-11-01T14:10:23.000Z
|
nes/bus/devices/joy/__init__.py
|
Hexadorsimal/pynes
|
dbb3d40c1240fa27f70fa798bcec09188755eec2
|
[
"MIT"
] | null | null | null |
from .joy1 import Joy1
from .joy2 import Joy2
| 15.333333
| 22
| 0.782609
| 8
| 46
| 4.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0.173913
| 46
| 2
| 23
| 23
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
09ffb6ae881a61b59ef28ab49ae63c006b7f6b86
| 308
|
py
|
Python
|
testproject/testproject/views.py
|
vicalloy/django-lb-adminlte
|
ba6ae6fd83e0882937c70326975783c46a73a812
|
[
"MIT"
] | 3
|
2017-04-25T10:15:16.000Z
|
2021-02-12T20:06:29.000Z
|
testproject/testproject/views.py
|
vicalloy/django-lb-adminlte
|
ba6ae6fd83e0882937c70326975783c46a73a812
|
[
"MIT"
] | null | null | null |
testproject/testproject/views.py
|
vicalloy/django-lb-adminlte
|
ba6ae6fd83e0882937c70326975783c46a73a812
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .forms import LeaveForm
def base(request):
return render(request, 'base.html')
def base_ext(request):
return render(request, 'base_ext.html')
def form(request):
ctx = {
'form': LeaveForm()
}
return render(request, 'form.html', ctx)
| 16.210526
| 44
| 0.665584
| 39
| 308
| 5.205128
| 0.384615
| 0.17734
| 0.280788
| 0.256158
| 0.295567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211039
| 308
| 18
| 45
| 17.111111
| 0.835391
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
61cd983afeb411788d362af451169a18e6532a23
| 215
|
py
|
Python
|
landlab/components/stream_power/__init__.py
|
SiccarPoint/landlab
|
4150db083a0426b3647e31ffa80dfefb5faa5a60
|
[
"MIT"
] | 1
|
2015-08-17T19:29:50.000Z
|
2015-08-17T19:29:50.000Z
|
landlab/components/stream_power/__init__.py
|
laijingtao/landlab
|
871151bff814e672b4f09f091b6347367758c764
|
[
"MIT"
] | 1
|
2016-03-02T01:24:41.000Z
|
2016-03-02T01:24:41.000Z
|
landlab/components/stream_power/__init__.py
|
SiccarPoint/landlab
|
4150db083a0426b3647e31ffa80dfefb5faa5a60
|
[
"MIT"
] | 2
|
2017-07-03T20:21:13.000Z
|
2018-09-06T23:58:19.000Z
|
from .stream_power import StreamPowerEroder
from .fastscape_stream_power import FastscapeEroder
from .sed_flux_dep_incision import SedDepEroder
__all__ = ['StreamPowerEroder', 'FastscapeEroder', 'SedDepEroder', ]
| 30.714286
| 68
| 0.837209
| 22
| 215
| 7.727273
| 0.590909
| 0.129412
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 215
| 6
| 69
| 35.833333
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0.204651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
61d3fe4569768281efbb3893be53a50d23435123
| 274
|
py
|
Python
|
ifs/source/confd.py
|
cbednarski/ifs-python
|
9629ba857b1c397fc1a1f13eeee46e5427fb2744
|
[
"0BSD"
] | 6
|
2016-03-29T21:12:43.000Z
|
2021-05-01T18:34:10.000Z
|
ifs/source/confd.py
|
cbednarski/ifs-python
|
9629ba857b1c397fc1a1f13eeee46e5427fb2744
|
[
"0BSD"
] | 2
|
2015-08-12T01:34:51.000Z
|
2015-08-25T19:23:17.000Z
|
ifs/source/confd.py
|
cbednarski/ifs-python
|
9629ba857b1c397fc1a1f13eeee46e5427fb2744
|
[
"0BSD"
] | null | null | null |
version = '0.11.0'
version_cmd = 'confd -version'
download_url = 'https://github.com/kelseyhightower/confd/releases/download/vVERSION/confd-VERSION-linux-amd64'
install_script = """
chmod +x confd-VERSION-linux-amd64
mv -f confd-VERSION-linux-amd64 /usr/local/bin/confd
"""
| 34.25
| 110
| 0.762774
| 40
| 274
| 5.15
| 0.6
| 0.23301
| 0.247573
| 0.320388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039526
| 0.076642
| 274
| 7
| 111
| 39.142857
| 0.774704
| 0
| 0
| 0
| 0
| 0.142857
| 0.737226
| 0.182482
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
61d4d009fe33d72d82bc543c24b4dfe79633eceb
| 78
|
py
|
Python
|
double3/double3sdk/documentation/documentation.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
double3/double3sdk/documentation/documentation.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
double3/double3sdk/documentation/documentation.py
|
CLOMING/winter2021_double
|
9b920baaeb3736a785a6505310b972c49b5b21e9
|
[
"Apache-2.0"
] | null | null | null |
from double3sdk.double_api import _DoubleAPI
class _Documentation:
pass
| 13
| 44
| 0.807692
| 9
| 78
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0.166667
| 78
| 5
| 45
| 15.6
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
1133682c338aa6e81e5d76b2b8d1c64cc8853828
| 6,979
|
py
|
Python
|
tests/run_tests/driver_mag_test.py
|
MiraGeoscience/mirageoscience-apps
|
8c445ec8f2391349aa4cac6c705426301b3c31ca
|
[
"MIT"
] | null | null | null |
tests/run_tests/driver_mag_test.py
|
MiraGeoscience/mirageoscience-apps
|
8c445ec8f2391349aa4cac6c705426301b3c31ca
|
[
"MIT"
] | null | null | null |
tests/run_tests/driver_mag_test.py
|
MiraGeoscience/mirageoscience-apps
|
8c445ec8f2391349aa4cac6c705426301b3c31ca
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
import numpy as np
from geoh5py.workspace import Workspace
from SimPEG import utils
from geoapps.utils import get_inversion_output
from geoapps.utils.testing import check_target, setup_inversion_workspace
# import pytest
# pytest.skip("eliminating conflicting test.", allow_module_level=True)
# To test the full run and validate the inversion.
# Move this file out of the test directory and run.
target_run = {
"data_norm": 11.707134,
"phi_d": 1.598,
"phi_m": 8.824e-6,
}
def test_susceptibility_run(
tmp_path,
n_grid_points=2,
max_iterations=1,
pytest=True,
refinement=(2,),
):
from geoapps.inversion.driver import InversionDriver
from geoapps.inversion.potential_fields import MagneticScalarParams
np.random.seed(0)
inducing_field = (50000.0, 90.0, 0.0)
# Run the forward
geoh5, mesh, model, survey, topography = setup_inversion_workspace(
tmp_path,
background=0.0,
anomaly=0.05,
refinement=refinement,
n_electrodes=n_grid_points,
n_lines=n_grid_points,
flatten=False,
)
params = MagneticScalarParams(
forward_only=True,
geoh5=geoh5,
mesh=model.parent.uid,
topography_object=topography.uid,
inducing_field_strength=inducing_field[0],
inducing_field_inclination=inducing_field[1],
inducing_field_declination=inducing_field[2],
resolution=0.0,
z_from_topo=False,
data_object=survey.uid,
starting_model_object=model.parent.uid,
starting_model=model.uid,
)
params.workpath = tmp_path
fwr_driver = InversionDriver(params)
fwr_driver.run()
geoh5 = Workspace(geoh5.h5file)
tmi = geoh5.get_entity("Iteration_0_tmi")[0]
# Run the inverse
np.random.seed(0)
params = MagneticScalarParams(
geoh5=geoh5,
mesh=mesh.uid,
topography_object=topography.uid,
inducing_field_strength=inducing_field[0],
inducing_field_inclination=inducing_field[1],
inducing_field_declination=inducing_field[2],
resolution=0.0,
data_object=tmi.parent.uid,
starting_model=1e-4,
s_norm=0.0,
x_norm=0.0,
y_norm=0.0,
z_norm=0.0,
gradient_type="components",
lower_bound=0.0,
tmi_channel_bool=True,
z_from_topo=False,
tmi_channel=tmi.uid,
tmi_uncertainty=4.0,
max_iterations=max_iterations,
initial_beta_ratio=1e0,
)
params.workpath = tmp_path
driver = InversionDriver(params)
driver.run()
run_ws = Workspace(driver.params.geoh5.h5file)
output = get_inversion_output(
driver.params.geoh5.h5file, driver.params.ga_group.uid
)
output["data"] = tmi.values
if pytest:
check_target(output, target_run)
nan_ind = np.isnan(run_ws.get_entity("Iteration_0_model")[0].values)
inactive_ind = run_ws.get_entity("active_cells")[0].values == 0
assert np.all(nan_ind == inactive_ind)
else:
return fwr_driver.starting_model, driver.inverse_problem.model
target_mvi_run = {
"data_norm": 8.943476,
"phi_d": 0.00776,
"phi_m": 4.674e-6,
}
def test_magnetic_vector_run(
tmp_path,
n_grid_points=2,
max_iterations=1,
pytest=True,
refinement=(2,),
):
from geoapps.inversion.driver import InversionDriver
from geoapps.inversion.potential_fields import MagneticVectorParams
np.random.seed(0)
inducing_field = (50000.0, 90.0, 0.0)
# Run the forward
geoh5, mesh, model, survey, topography = setup_inversion_workspace(
tmp_path,
background=0.0,
anomaly=0.05,
refinement=refinement,
n_electrodes=n_grid_points,
n_lines=n_grid_points,
)
params = MagneticVectorParams(
forward_only=True,
geoh5=geoh5,
mesh=model.parent.uid,
topography_object=topography.uid,
inducing_field_strength=inducing_field[0],
inducing_field_inclination=inducing_field[1],
inducing_field_declination=inducing_field[2],
resolution=0.0,
z_from_topo=False,
data_object=survey.uid,
starting_model_object=model.parent.uid,
starting_model=model.uid,
starting_inclination=45,
starting_declination=270,
)
fwr_driver = InversionDriver(params)
fwr_driver.run()
geoh5 = Workspace(geoh5.h5file)
tmi = geoh5.get_entity("Iteration_0_tmi")[0]
# Run the inverse
params = MagneticVectorParams(
geoh5=geoh5,
mesh=mesh.uid,
topography_object=topography.uid,
inducing_field_strength=inducing_field[0],
inducing_field_inclination=inducing_field[1],
inducing_field_declination=inducing_field[2],
resolution=0.0,
data_object=tmi.parent.uid,
starting_model=1e-4,
s_norm=0.0,
x_norm=1.0,
y_norm=1.0,
z_norm=1.0,
gradient_type="components",
tmi_channel_bool=True,
z_from_topo=False,
tmi_channel=tmi.uid,
tmi_uncertainty=4.0,
max_iterations=max_iterations,
initial_beta_ratio=1e1,
prctile=100,
)
driver = InversionDriver(params)
driver.run()
run_ws = Workspace(driver.params.geoh5.h5file)
# Re-open the workspace and get iterations
output = get_inversion_output(
driver.params.geoh5.h5file, driver.params.ga_group.uid
)
output["data"] = tmi.values
if pytest:
check_target(output, target_mvi_run)
nan_ind = np.isnan(run_ws.get_entity("Iteration_0_amplitude_model")[0].values)
inactive_ind = run_ws.get_entity("active_cells")[0].values == 0
assert np.all(nan_ind == inactive_ind)
else:
return fwr_driver.starting_model, utils.spherical2cartesian(
driver.inverse_problem.model
)
if __name__ == "__main__":
# Full run
m_start, m_rec = test_susceptibility_run(
"./", n_grid_points=20, max_iterations=30, pytest=False, refinement=(4, 8)
)
residual = np.linalg.norm(m_rec - m_start) / np.linalg.norm(m_start) * 100.0
assert (
residual < 15.0
), f"Deviation from the true solution is {residual:.2f}%. Validate the solution!"
print("Susceptibility model is within 15% of the answer. Well done you!")
m_start, m_rec = test_magnetic_vector_run(
"./", n_grid_points=20, max_iterations=30, pytest=False, refinement=(4, 8)
)
residual = np.linalg.norm(m_rec - m_start) / np.linalg.norm(m_start) * 100.0
assert (
residual < 50.0
), f"Deviation from the true solution is {residual:.2f}%. Validate the solution!"
print("MVI model is within 50% of the answer. Done!")
| 31.436937
| 86
| 0.667861
| 912
| 6,979
| 4.864035
| 0.212719
| 0.076195
| 0.019838
| 0.02615
| 0.724076
| 0.717764
| 0.717764
| 0.717764
| 0.717764
| 0.717764
| 0
| 0.039274
| 0.233844
| 6,979
| 221
| 87
| 31.579186
| 0.79035
| 0.07193
| 0
| 0.696809
| 0
| 0
| 0.067193
| 0.00418
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0.010638
| false
| 0
| 0.047872
| 0
| 0.069149
| 0.010638
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.