hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7c125db9c000988b3b97088aac18a3ae9aabaa6
| 127
|
py
|
Python
|
venv/Lib/site-packages/dask_ml/cluster/__init__.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | 803
|
2017-06-16T02:08:30.000Z
|
2022-03-28T14:02:25.000Z
|
venv/Lib/site-packages/dask_ml/cluster/__init__.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | 748
|
2017-09-24T20:32:33.000Z
|
2022-03-28T18:49:27.000Z
|
venv/Lib/site-packages/dask_ml/cluster/__init__.py
|
ZhangQingsen/CISC849Proj
|
ae89693648ead79d97805d663c1db58dfc0786a0
|
[
"MIT"
] | 250
|
2017-06-15T15:57:18.000Z
|
2022-03-25T08:31:02.000Z
|
"""Unsupervised Clustering Algorithms"""
from .k_means import KMeans # noqa
from .spectral import SpectralClustering # noqa
| 25.4
| 48
| 0.779528
| 14
| 127
| 7
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141732
| 127
| 4
| 49
| 31.75
| 0.899083
| 0.354331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac0b99af4ee2abd4bd6323acd4469ecf5cd9eb17
| 132
|
py
|
Python
|
telegram_bot/keyboard/inline/__init__.py
|
alenworld/django_telegram_bot
|
aa9a3570787feaaf474086a8cee66155f749983e
|
[
"MIT"
] | 3
|
2021-07-07T02:30:56.000Z
|
2021-12-19T07:48:35.000Z
|
telegram_bot/keyboard/inline/__init__.py
|
alenworld/django_telegram_bot
|
aa9a3570787feaaf474086a8cee66155f749983e
|
[
"MIT"
] | null | null | null |
telegram_bot/keyboard/inline/__init__.py
|
alenworld/django_telegram_bot
|
aa9a3570787feaaf474086a8cee66155f749983e
|
[
"MIT"
] | 1
|
2021-07-07T02:42:23.000Z
|
2021-07-07T02:42:23.000Z
|
from .utils import make_addresses_inline_keyboard, keyboard_confirm_decline_broadcasting
from .faq import make_faq_inline_keyboard
| 33
| 88
| 0.901515
| 18
| 132
| 6.111111
| 0.611111
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 132
| 3
| 89
| 44
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac19781ccdeeff3922279d7686041a4361e2e3f6
| 106
|
py
|
Python
|
4_src/3_other/1_surasura-python/q5-4/q5-4.py
|
hirobel/todoapp
|
834e6dcdd3e6c227a79004c89430c6853935b23c
|
[
"Apache-2.0"
] | null | null | null |
4_src/3_other/1_surasura-python/q5-4/q5-4.py
|
hirobel/todoapp
|
834e6dcdd3e6c227a79004c89430c6853935b23c
|
[
"Apache-2.0"
] | null | null | null |
4_src/3_other/1_surasura-python/q5-4/q5-4.py
|
hirobel/todoapp
|
834e6dcdd3e6c227a79004c89430c6853935b23c
|
[
"Apache-2.0"
] | null | null | null |
alpha_num_dict = {
'a':1,
'b':2,
'c':3
}
alpha_num_dict['a'] = 10
print(alpha_num_dict['a'])
| 11.777778
| 26
| 0.537736
| 19
| 106
| 2.684211
| 0.578947
| 0.470588
| 0.705882
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060976
| 0.226415
| 106
| 9
| 26
| 11.777778
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0.046729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ac2d4bfeaa6d340b56548c5b5c8ad9ce99c96b72
| 174
|
py
|
Python
|
mocodo/dynamic.py
|
JeanHenri79/mocodo
|
2c9e68f81bb5528134fdb4ee3cb6fc8a4042c73a
|
[
"MIT"
] | 158
|
2015-09-01T13:49:22.000Z
|
2022-03-05T19:57:06.000Z
|
mocodo/dynamic.py
|
JeanHenri79/mocodo
|
2c9e68f81bb5528134fdb4ee3cb6fc8a4042c73a
|
[
"MIT"
] | 43
|
2015-09-01T08:46:39.000Z
|
2022-01-07T18:50:10.000Z
|
mocodo/dynamic.py
|
JeanHenri79/mocodo
|
2c9e68f81bb5528134fdb4ee3cb6fc8a4042c73a
|
[
"MIT"
] | 45
|
2015-10-02T21:15:22.000Z
|
2022-03-17T16:49:23.000Z
|
#!/usr/bin/env python
# encoding: utf-8
class Dynamic(str):
"""Wrapper for the strings that need to be dynamically interpreted by the generated Python files."""
pass
| 29
| 104
| 0.718391
| 26
| 174
| 4.807692
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.178161
| 174
| 6
| 105
| 29
| 0.867133
| 0.752874
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ac5273dbce439b8b19234d66d14e10e37f55e6c3
| 325
|
py
|
Python
|
toolcli/__init__.py
|
sslivkoff/toolcli
|
7f6cb50bdb3eab5118d4fe4dd256c68a206f1df2
|
[
"MIT"
] | null | null | null |
toolcli/__init__.py
|
sslivkoff/toolcli
|
7f6cb50bdb3eab5118d4fe4dd256c68a206f1df2
|
[
"MIT"
] | null | null | null |
toolcli/__init__.py
|
sslivkoff/toolcli
|
7f6cb50bdb3eab5118d4fe4dd256c68a206f1df2
|
[
"MIT"
] | null | null | null |
"""toolcli makes it easy to create structured hierarchical cli tools"""
from .command_utils import *
from .capture_utils import *
from .file_edit_utils import *
from .file_validate_utils import *
from .input_utils import *
from .style_utils import *
from .terminal_utils import *
from .spec import *
__version__ = '0.5.3'
| 23.214286
| 71
| 0.769231
| 47
| 325
| 5.042553
| 0.553191
| 0.324895
| 0.443038
| 0.160338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01083
| 0.147692
| 325
| 13
| 72
| 25
| 0.844765
| 0.2
| 0
| 0
| 0
| 0
| 0.019685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.888889
| 0
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac572c0fa27ad1869c265f291345074bb701cfb5
| 75
|
py
|
Python
|
users/models.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | null | null | null |
users/models.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | 9
|
2021-04-08T20:01:45.000Z
|
2022-03-12T00:48:46.000Z
|
users/models.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.auth import get_user_model
# Create your models here.
| 18.75
| 46
| 0.813333
| 12
| 75
| 4.916667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 75
| 3
| 47
| 25
| 0.907692
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac6619269b99b6f008f0fb8351119b590985d0fc
| 38
|
py
|
Python
|
13.range.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
13.range.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
13.range.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
for i in range(0,100):
print i,
| 12.666667
| 23
| 0.552632
| 8
| 38
| 2.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0.315789
| 38
| 2
| 24
| 19
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
3ba3215cea34cf50d1d2fb7c72dba8687b35394a
| 132
|
py
|
Python
|
example/blog/types_.py
|
njncalub/apistar-mongoengine
|
c6bff844449ebc910bf3a85c075d760204606047
|
[
"MIT"
] | null | null | null |
example/blog/types_.py
|
njncalub/apistar-mongoengine
|
c6bff844449ebc910bf3a85c075d760204606047
|
[
"MIT"
] | 6
|
2018-05-17T15:52:38.000Z
|
2018-05-27T04:31:32.000Z
|
example/blog/types_.py
|
njncalub/apistar-mongoengine
|
c6bff844449ebc910bf3a85c075d760204606047
|
[
"MIT"
] | null | null | null |
from apistar import validators
from apistar_mongoengine.types import Type
class PostType(Type):
message = validators.String()
| 18.857143
| 42
| 0.795455
| 16
| 132
| 6.5
| 0.6875
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143939
| 132
| 6
| 43
| 22
| 0.920354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3ba3cb5da5b09e4cd19b67f5e8fe68f926725129
| 6,649
|
py
|
Python
|
SubgraphCountingMatching/utils/scheduler.py
|
HKUST-KnowComp/DualMessagePassing
|
d29d627be2a8c8f24b52e3db2c383e33a059aaa7
|
[
"MIT"
] | 12
|
2021-12-06T02:31:17.000Z
|
2022-03-11T15:17:57.000Z
|
SubgraphCountingMatching/utils/scheduler.py
|
HKUST-KnowComp/DualMessagePassing
|
d29d627be2a8c8f24b52e3db2c383e33a059aaa7
|
[
"MIT"
] | null | null | null |
SubgraphCountingMatching/utils/scheduler.py
|
HKUST-KnowComp/DualMessagePassing
|
d29d627be2a8c8f24b52e3db2c383e33a059aaa7
|
[
"MIT"
] | null | null | null |
import math
from torch.optim.lr_scheduler import LambdaLR
PI = 3.141592653589793
INIT_STEPS = 600
SCHEDULE_STEPS = 10000
NUM_CYCLES = 2
MIN_PERCENT = 1e-3
class ConstantScheduler(LambdaLR):
def __init__(self):
pass
def set_optimizer(self, optimizer):
super(ConstantScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
return 1.0
class ConstantWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS
):
self.num_warmup_steps = num_warmup_steps
def set_optimizer(self, optimizer):
super(ConstantWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / max(1.0, float(self.num_warmup_steps))
return 1.0
class LinearScheduler(LambdaLR):
def __init__(
self,
num_schedule_steps=SCHEDULE_STEPS,
min_percent=MIN_PERCENT
):
self.num_schedule_steps = num_schedule_steps
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
return max(
self.min_percent,
float(self.num_schedule_steps - current_step) / \
float(max(1, self.num_schedule_steps))
)
class LinearWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
return max(
self.min_percent,
float(self.num_schedule_steps - current_step) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
)
class LinearWarmupRestartScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(LinearWarmupRestartScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
if progress >= 1.0:
return self.min_percent
return max(self.min_percent, 1 - (float(self.num_cycles) * progress) % 1.0)
class CosineScheduler(LambdaLR):
def __init__(
self,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
progress = float(current_step) / float(max(1, self.num_schedule_steps))
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * float(self.num_cycles) * 2.0 * progress)))
class CosineWarmupScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineWarmupScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * float(self.num_cycles) * 2.0 * progress)))
class CosineWarmupRestartScheduler(LambdaLR):
def __init__(
self,
num_warmup_steps=INIT_STEPS,
num_schedule_steps=SCHEDULE_STEPS,
num_cycles=NUM_CYCLES,
min_percent=MIN_PERCENT
):
self.num_warmup_steps = num_warmup_steps
self.num_schedule_steps = num_schedule_steps
self.num_cycles = num_cycles
self.min_percent = min_percent
def set_optimizer(self, optimizer):
super(CosineWarmupRestartScheduler, self).__init__(optimizer, self.lr_lambda)
def lr_lambda(self, current_step):
if current_step < self.num_warmup_steps:
return float(current_step) / float(max(1, self.num_warmup_steps))
progress = float(current_step - self.num_warmup_steps) / \
float(max(1, self.num_schedule_steps - self.num_warmup_steps))
if progress >= 1.0:
return self.min_percent
return max(self.min_percent, 0.5 * (1.0 + math.cos(PI * ((float(self.num_cycles) * progress) % 1.0))))
supported_schedulers = {
"constant": ConstantScheduler(),
"constant_with_warmup": ConstantWarmupScheduler(),
"linear": LinearScheduler(),
"linear_with_warmup": LinearWarmupScheduler(),
"linear_with_warmup_and_restart": LinearWarmupRestartScheduler(),
"cosine": CosineScheduler(),
"cosine_with_warmup": CosineWarmupScheduler(),
"cosine_with_warmup_and_restart": CosineWarmupRestartScheduler(),
}
def map_scheduler_str_to_scheduler(scheduler, **kw):
if scheduler not in supported_schedulers:
raise NotImplementedError
sdlr = supported_schedulers[scheduler]
for k, v in kw.items():
if hasattr(sdlr, k):
try:
setattr(sdlr, k, v)
except:
pass
return sdlr
| 32.915842
| 110
| 0.677545
| 812
| 6,649
| 5.157635
| 0.092365
| 0.085244
| 0.106972
| 0.116046
| 0.752627
| 0.746896
| 0.731137
| 0.714422
| 0.713228
| 0.685769
| 0
| 0.013362
| 0.234622
| 6,649
| 201
| 111
| 33.079602
| 0.809589
| 0
| 0
| 0.679012
| 0
| 0
| 0.020454
| 0.009024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154321
| false
| 0.012346
| 0.012346
| 0.012346
| 0.314815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3ba987e4e2e10be17c68bd09d6df5a1a5707460d
| 47
|
py
|
Python
|
SciFiReaders/__version__.py
|
sumner-harris/SciFiReaders
|
4494b7e7350ad2a6198c87590d193393566ad470
|
[
"MIT"
] | null | null | null |
SciFiReaders/__version__.py
|
sumner-harris/SciFiReaders
|
4494b7e7350ad2a6198c87590d193393566ad470
|
[
"MIT"
] | null | null | null |
SciFiReaders/__version__.py
|
sumner-harris/SciFiReaders
|
4494b7e7350ad2a6198c87590d193393566ad470
|
[
"MIT"
] | 1
|
2021-09-02T11:39:57.000Z
|
2021-09-02T11:39:57.000Z
|
version = '0.0.1'
time = '2021-02-07 10:00:25'
| 15.666667
| 28
| 0.595745
| 11
| 47
| 2.545455
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.425
| 0.148936
| 47
| 2
| 29
| 23.5
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3bac834ffb995d7d061a7b3f4a823e89b6e9d515
| 591
|
py
|
Python
|
app/models.py
|
Nexus357ZA/upgraded-couscous
|
3e4ec3e02f589565bfa49e0e4d2d2ec7122aee21
|
[
"Apache-2.0"
] | null | null | null |
app/models.py
|
Nexus357ZA/upgraded-couscous
|
3e4ec3e02f589565bfa49e0e4d2d2ec7122aee21
|
[
"Apache-2.0"
] | null | null | null |
app/models.py
|
Nexus357ZA/upgraded-couscous
|
3e4ec3e02f589565bfa49e0e4d2d2ec7122aee21
|
[
"Apache-2.0"
] | null | null | null |
from flask import current_app, url_for
import json
#from app import db
class Article():
# #TODO - This is just a skeleton
# id = db.Column(db.Integer, primary_key=True)
# source = db.Column(db.String(160))
# author = db.Column(db.String(160))
# title = db.Column(db.String(160))
# description = db.Column(db.String(160))
# url = db.Column(db.String())
# urlToImage = db.Column(db.String())
# publishedAt = db.Column(db.String(160))
# content = db.Column(db.String())
#
# def __repr__(self):
# return '<Article {}>'.format(self.title)
| 31.105263
| 50
| 0.629442
| 82
| 591
| 4.45122
| 0.45122
| 0.19726
| 0.246575
| 0.350685
| 0.260274
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032051
| 0.208122
| 591
| 19
| 51
| 31.105263
| 0.747863
| 0.746193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3bbb01baddf4b73ec8a083273fe9999f3407ba7d
| 211
|
py
|
Python
|
src/mbi/__init__.py
|
siddhant-pradhan/private-pgm
|
f60734e444175e78e748e5aaab63ba7c3354a7f3
|
[
"Apache-2.0"
] | null | null | null |
src/mbi/__init__.py
|
siddhant-pradhan/private-pgm
|
f60734e444175e78e748e5aaab63ba7c3354a7f3
|
[
"Apache-2.0"
] | null | null | null |
src/mbi/__init__.py
|
siddhant-pradhan/private-pgm
|
f60734e444175e78e748e5aaab63ba7c3354a7f3
|
[
"Apache-2.0"
] | null | null | null |
from mbi.domain import Domain
from mbi.dataset import Dataset
from mbi.factor import Factor
from mbi.graphical_model import GraphicalModel
from mbi.inference import FactoredInference
from mbi.lbp import LBP
| 35.166667
| 47
| 0.838863
| 31
| 211
| 5.677419
| 0.387097
| 0.238636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132701
| 211
| 6
| 48
| 35.166667
| 0.961749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3bcfccd82c906d1d36540db295694840f6957797
| 155
|
py
|
Python
|
conftest.py
|
omri374/openvino-textspotting-docker
|
1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f
|
[
"Unlicense"
] | null | null | null |
conftest.py
|
omri374/openvino-textspotting-docker
|
1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f
|
[
"Unlicense"
] | null | null | null |
conftest.py
|
omri374/openvino-textspotting-docker
|
1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f
|
[
"Unlicense"
] | null | null | null |
import pytest
import os
@pytest.fixture(scope='session')
def app(request):
from text_spotting import Server
server = Server()
return server.app
| 22.142857
| 36
| 0.735484
| 21
| 155
| 5.380952
| 0.666667
| 0.212389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174194
| 155
| 7
| 37
| 22.142857
| 0.882813
| 0
| 0
| 0
| 0
| 0
| 0.044872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3bde682a1eb7775e2d6493f7ca7431ea7ff8cc2a
| 80
|
py
|
Python
|
celery_repro/repro/__init__.py
|
Evzdrop/celery-2619-repro
|
97a8e8f3786658f06c14a1813a9043640e63f2b8
|
[
"MIT"
] | null | null | null |
celery_repro/repro/__init__.py
|
Evzdrop/celery-2619-repro
|
97a8e8f3786658f06c14a1813a9043640e63f2b8
|
[
"MIT"
] | 2
|
2020-02-11T23:00:18.000Z
|
2020-06-05T17:07:30.000Z
|
celery_repro/repro/__init__.py
|
Evzdrop/celery-2619-repro
|
97a8e8f3786658f06c14a1813a9043640e63f2b8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .celeryApp import app as celery_app
| 26.666667
| 40
| 0.8625
| 12
| 80
| 5.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 80
| 3
| 40
| 26.666667
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3bde9926aa28bc4a5bb9861476d37caf5f616a68
| 11,419
|
py
|
Python
|
tests/test_question_category.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
tests/test_question_category.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
tests/test_question_category.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from src.pipeline.question_category import QuestionCategory, GetCategoryFromQuestionStructure
from src.unpack_data import QuestionAnswerRecipe, Recipe, Q_A
class TestQuestionCategory(unittest.TestCase):
def test_determine_description(self):
qc = QuestionCategory("counting_times")
self.assertIn("Counting times? A: Number", qc.description)
def test_determine_description_str(self):
qc = QuestionCategory("event_ordering")
self.assertIn("X, Y which comes first?", qc.description)
def test_no_description(self):
qc = QuestionCategory("whatever")
self.assertEqual("N/A", qc.description)
class TestGetCategoryFromQuestionStructure(unittest.TestCase):
def test_regex_classifier_class_counting_times(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = How many times is the bowl used?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("counting_times", a_class.category)
def test_regex_classifier_class_counting_actions(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = How many actions does it take to process the tomato?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("counting_actions", a_class.category)
def test_regex_classifier_class_counting_uses(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = How many spoons are used?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("counting_uses", a_class.category)
def test_regex_classifier_class_ellipsis(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = What should be served?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("ellipsis", a_class.category)
def test_regex_classifier_class_location_crl(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = Where should you add the chopped vegetables?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("location_crl", a_class.category)
def test_regex_classifier_class_how_1(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = How do you brush the salad dressing?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method", a_class.category)
def test_regex_classifier_class_how_2(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = How did you get the cooked vegetable?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("lifespan_how", a_class.category)
def test_regex_classifier_class_lifespan_what(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = What's in the lentil salad?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("lifespan_what", a_class.category)
def test_regex_classifier_class_event_ordering(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A(
"# question 20-9 = Cutting the stem into bite - size pieces into bite - size pieces and sauting minced "
"meat in a separate pan, which comes first?",
"answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("event_ordering", a_class.category)
def test_regex_classifier_class_result(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = To what extent do you cut carrots and zucchini?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("result", a_class.category)
def test_regex_classifier_class_how_3(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = How do you prick the dough slightly?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method", a_class.category)
def test_regex_classifier_class_time(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = For how long do you boil the potatoes until cooked?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("time", a_class.category)
def test_regex_classifier_class_location_srl(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = Where do you season the trout with salt and pepper?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("location_srl", a_class.category)
def test_regex_classifier_class_extent(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = By how much do you cover the beans with water in a pot?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("extent", a_class.category)
def test_regex_classifier_class_how_4(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = How do you coat hot syrup mixture the popcorn nut mixture?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method", a_class.category)
def test_regex_classifier_class_purpose1(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = Why do you use gas?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("purpose", a_class.category)
def test_regex_classifier_class_copatient1(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = What do you mix the oil in a small bowl with?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("copatient", a_class.category)
def test_regex_classifier_class_copatient2(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = What do you put the raspberries into a liqudizer with?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("copatient", a_class.category)
def test_regex_classifier_class_how_5(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = How do you use the same pot of water??"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method", a_class.category)
def test_regex_classifier_class_purpose2(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(qa=Q_A("# question 20-9 = Why do you pinch the pizza dough?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("purpose", a_class.category)
def test_regex_classifier_class_source(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = From where do you remove the spinach and shallots mix?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("source", a_class.category)
def test_regex_classifier_class_location_change(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = Where was the stuffed mushroom before it was garnished?", "answer = a"),
recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("location_change", a_class.category)
def test_regex_classifier_class_result_na(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(
qa=Q_A("# question 20-9 = To what extent do you cut the shortening in?", "answer = a"), recipe=None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("result", a_class.category)
def test_regex_classifier_class_how_preheat_1(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(Q_A("# question 20-9 = How do you preheat your oven?"), None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method_preheat", a_class.category)
def test_regex_classifier_class_how_preheat__alt_spelling(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(Q_A("# question 20-9 = How do you pre - heat the oven?", "answer = a"), None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("method_preheat", a_class.category)
def test_regex_classifier_not_recognized(self):
engine = GetCategoryFromQuestionStructure()
question = QuestionAnswerRecipe(Q_A("# question XYZ-ABC = Is this question recognizable?", "answer = No"), None)
a_class = engine.predict_category(question)
self.assertIsNotNone(a_class)
self.assertEqual("not_recognized", a_class.category)
| 49.008584
| 120
| 0.685962
| 1,282
| 11,419
| 5.893136
| 0.144306
| 0.06274
| 0.041297
| 0.075711
| 0.798676
| 0.798676
| 0.793779
| 0.793779
| 0.761482
| 0.704169
| 0
| 0.009573
| 0.222436
| 11,419
| 232
| 121
| 49.219828
| 0.841311
| 0
| 0
| 0.580808
| 0
| 0.005051
| 0.191348
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 1
| 0.146465
| false
| 0
| 0.015152
| 0
| 0.171717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3bea08882e0adb610c2d7732761910faa2bf79e5
| 222
|
py
|
Python
|
Test_address.py
|
KaivnD/Noa-Core
|
82a7f65289df4f703da0bfebc8cb3d453a9cf7bd
|
[
"MIT"
] | null | null | null |
Test_address.py
|
KaivnD/Noa-Core
|
82a7f65289df4f703da0bfebc8cb3d453a9cf7bd
|
[
"MIT"
] | 4
|
2020-03-24T17:35:25.000Z
|
2021-06-02T00:30:23.000Z
|
Test_address.py
|
KaivnD/Noa-Core
|
82a7f65289df4f703da0bfebc8cb3d453a9cf7bd
|
[
"MIT"
] | null | null | null |
from AdressConvertApiTasker.AdressConvertApiTasker import *
import_data={'location':'31.225696563611,121.49884033194',}
output_data={'file_name':'test.txt'}
a=AdressConvertApiTasker(import_data,output_data)
print(a.run())
| 37
| 59
| 0.81982
| 26
| 222
| 6.807692
| 0.653846
| 0.316384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 0.036036
| 222
| 5
| 60
| 44.4
| 0.696262
| 0
| 0
| 0
| 0
| 0
| 0.252252
| 0.13964
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce0d9c677862920dd6c5eb3654daff22915695a0
| 228
|
py
|
Python
|
students/k33402/Sholomov_Dan/lab34/lab3/order/admin.py
|
heidamn/ITMO_ICT_WebDevelopment_2020-2021
|
47eb0cdf7c7dbe8d071bc4fd3f1ac94848475e7b
|
[
"MIT"
] | null | null | null |
students/k33402/Sholomov_Dan/lab34/lab3/order/admin.py
|
heidamn/ITMO_ICT_WebDevelopment_2020-2021
|
47eb0cdf7c7dbe8d071bc4fd3f1ac94848475e7b
|
[
"MIT"
] | null | null | null |
students/k33402/Sholomov_Dan/lab34/lab3/order/admin.py
|
heidamn/ITMO_ICT_WebDevelopment_2020-2021
|
47eb0cdf7c7dbe8d071bc4fd3f1ac94848475e7b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Order, OrderedItem
@admin.register(Order)
class ItemAdmin(admin.ModelAdmin):
pass
@admin.register(OrderedItem)
class ItemAdmin(admin.ModelAdmin):
pass
| 17.538462
| 39
| 0.736842
| 26
| 228
| 6.461538
| 0.5
| 0.154762
| 0.22619
| 0.345238
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179825
| 228
| 12
| 40
| 19
| 0.898396
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ce1620eace121783dc65dbb266550baa5f0f2077
| 864
|
py
|
Python
|
server/openapi_server/models/__init__.py
|
hubmapconsortium/ontology-api
|
f7fadf31de028acdd9cabbb5e9d6e48b9863ffac
|
[
"MIT"
] | 2
|
2021-10-03T15:31:55.000Z
|
2021-10-04T08:55:23.000Z
|
server/openapi_server/models/__init__.py
|
hubmapconsortium/ontology-api
|
f7fadf31de028acdd9cabbb5e9d6e48b9863ffac
|
[
"MIT"
] | 105
|
2020-12-11T13:03:31.000Z
|
2022-03-31T17:08:03.000Z
|
server/openapi_server/models/__init__.py
|
hubmapconsortium/ontology-api
|
f7fadf31de028acdd9cabbb5e9d6e48b9863ffac
|
[
"MIT"
] | 2
|
2021-07-08T14:49:25.000Z
|
2022-02-14T20:12:20.000Z
|
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from openapi_server.models.codes_codes_obj import CodesCodesObj
from openapi_server.models.concept_detail import ConceptDetail
from openapi_server.models.concept_term import ConceptTerm
from openapi_server.models.full_capacity_term import FullCapacityTerm
from openapi_server.models.qqst import QQST
from openapi_server.models.sab_code_term import SabCodeTerm
from openapi_server.models.sab_definition import SabDefinition
from openapi_server.models.sab_relationship_concept_prefterm import SabRelationshipConceptPrefterm
from openapi_server.models.semantic_stn import SemanticStn
from openapi_server.models.sty_tui_stn import StyTuiStn
from openapi_server.models.term_resp_obj import TermRespObj
from openapi_server.models.termtype_code import TermtypeCode
| 48
| 98
| 0.890046
| 117
| 864
| 6.273504
| 0.384615
| 0.179837
| 0.277929
| 0.376022
| 0.188011
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002503
| 0.075231
| 864
| 17
| 99
| 50.823529
| 0.916145
| 0.068287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
025238bd5558ffe79241c57f24def7add70a520a
| 8,783
|
py
|
Python
|
tools/moduletests/unit/test_selinuxpermissive.py
|
stivesso/aws-ec2rescue-linux
|
63fb350ba65d3d67c25c0ecc367793adef6cebbd
|
[
"Apache-2.0"
] | 178
|
2017-07-18T18:58:36.000Z
|
2022-03-31T03:12:52.000Z
|
tools/moduletests/unit/test_selinuxpermissive.py
|
stivesso/aws-ec2rescue-linux
|
63fb350ba65d3d67c25c0ecc367793adef6cebbd
|
[
"Apache-2.0"
] | 45
|
2017-07-18T23:19:06.000Z
|
2021-11-30T17:31:51.000Z
|
tools/moduletests/unit/test_selinuxpermissive.py
|
stivesso/aws-ec2rescue-linux
|
63fb350ba65d3d67c25c0ecc367793adef6cebbd
|
[
"Apache-2.0"
] | 72
|
2017-07-18T18:57:59.000Z
|
2022-03-29T06:14:06.000Z
|
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the selinuxpermissive module
"""
import os
import sys
import unittest
import mock
import moduletests.src.selinuxpermissive
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
class Testselinuxpermissive(unittest.TestCase):
config_file_path = "/etc/selinux/config"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
def test_detect_no_selinux(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_detect_problem(self, isfile_mock):
self.assertTrue(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=permissive"))
def test_detect_noproblem(self, isfile_mock):
self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))
self.assertTrue(isfile_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.open", mock.mock_open(read_data="SELINUX=enforcing"))
def test_fix_success(self):
self.assertTrue(moduletests.src.selinuxpermissive.fix(self.config_file_path))
@mock.patch("moduletests.src.selinuxpermissive.open", side_effect=IOError)
def test_fix_exception(self, open_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.selinuxpermissive.fix, self.config_file_path)
self.assertEqual(self.output.getvalue(), "[WARN] Unable to replace contents of /etc/selinux/config\n")
self.assertTrue(open_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=(True, False))
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_success_fixed(self, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=False)
def test_run_success(self, detect_mock, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.selinuxpermissive.run())
self.assertTrue("[SUCCESS] selinux is not set to enforcing" in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.backup", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_isfile(self,
restore_mock,
fix_mock,
backup_mock,
isfile_mock,
detect_mock,
config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", return_value=True)
@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.selinuxpermissive.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue("[FAILURE] failed to set selinux set to permissive" in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")
@mock.patch("moduletests.src.selinuxpermissive.detect", side_effect=IOError)
@mock.patch("moduletests.src.selinuxpermissive.restore", return_value=True)
def test_run_failure_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(restore_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.selinuxpermissive.get_config_dict", side_effect=IOError)
def test_run_failure_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.selinuxpermissive.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(config_mock.called)
| 51.063953
| 114
| 0.683138
| 1,016
| 8,783
| 5.747047
| 0.165354
| 0.095907
| 0.212365
| 0.110293
| 0.773934
| 0.754239
| 0.742422
| 0.737113
| 0.699092
| 0.695667
| 0
| 0.005472
| 0.209382
| 8,783
| 171
| 115
| 51.362573
| 0.835397
| 0.086189
| 0
| 0.621212
| 0
| 0
| 0.23869
| 0.153462
| 0
| 0
| 0.001125
| 0
| 0.325758
| 1
| 0.098485
| false
| 0
| 0.075758
| 0
| 0.189394
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5a1f5bbe05981d0eb994d7e823249c4d51b796e1
| 177
|
py
|
Python
|
app/admin.py
|
Kanogaelias/neighbourhoodwatch
|
0dea00bdf83e5045b3cfaba55e483bdd72343a1d
|
[
"Unlicense"
] | 1
|
2020-09-28T03:51:33.000Z
|
2020-09-28T03:51:33.000Z
|
app/admin.py
|
Kanogaelias/neighbourhoodwatch
|
0dea00bdf83e5045b3cfaba55e483bdd72343a1d
|
[
"Unlicense"
] | null | null | null |
app/admin.py
|
Kanogaelias/neighbourhoodwatch
|
0dea00bdf83e5045b3cfaba55e483bdd72343a1d
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Neighbourhood,Profile,Business
admin.site.register(Neighbourhood)
admin.site.register(Profile)
admin.site.register(Business)
| 35.4
| 50
| 0.853107
| 23
| 177
| 6.565217
| 0.478261
| 0.178808
| 0.337748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056497
| 177
| 5
| 51
| 35.4
| 0.904192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5a51fbe326c398e262cbdb3bebbaebe4d2376347
| 57
|
py
|
Python
|
gym_forest/envs/__init__.py
|
kmckiern/gym-forest
|
12f7c8c1ab64d18983c8817bec4efe627e298bde
|
[
"Apache-2.0"
] | 2
|
2020-01-14T07:47:29.000Z
|
2020-04-16T13:50:03.000Z
|
gym_forest/envs/__init__.py
|
kmckiern/gym-forest
|
12f7c8c1ab64d18983c8817bec4efe627e298bde
|
[
"Apache-2.0"
] | null | null | null |
gym_forest/envs/__init__.py
|
kmckiern/gym-forest
|
12f7c8c1ab64d18983c8817bec4efe627e298bde
|
[
"Apache-2.0"
] | null | null | null |
from gym_forest.envs.gym_forest import ForestDiscreteEnv
| 28.5
| 56
| 0.894737
| 8
| 57
| 6.125
| 0.75
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ce556fd1cb299c8c110043a6231cf66a106e6003
| 168
|
py
|
Python
|
Lib/site-packages/QtModularUiPack/Widgets/VideoExtensions/__init__.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 3
|
2019-11-11T12:09:23.000Z
|
2022-02-17T10:02:55.000Z
|
QtModularUiPack/Widgets/VideoExtensions/__init__.py
|
dowerner/QtModularUiPack
|
de2ce6ba3a1cd52ca00eaea3ea3bb2247fe76ba3
|
[
"Apache-2.0"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/QtModularUiPack/Widgets/VideoExtensions/__init__.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 2
|
2019-11-11T12:09:31.000Z
|
2019-11-11T12:09:42.000Z
|
from .image_render_widget import ImageRenderWidget, ImageCircle, ImageEllipse, ImageLayer, ImageRectangle, ImageShape
from .video_frame_grabber import VideoFrameGrabber
| 84
| 117
| 0.886905
| 17
| 168
| 8.529412
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 168
| 2
| 118
| 84
| 0.929487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ce593babf7fb43d18d84500bd381972dac1e100c
| 3,427
|
py
|
Python
|
models/unetp/layers.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 4
|
2021-04-21T05:09:49.000Z
|
2022-01-17T13:02:45.000Z
|
models/unetp/layers.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | null | null | null |
models/unetp/layers.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 1
|
2021-07-08T02:20:43.000Z
|
2021-07-08T02:20:43.000Z
|
# -*- coding: utf-8 -*-
# @Time : 20/7/2 11:08
# @Author : qgking
# @Email : qgking@tju.edu.cn
# @Software: PyCharm
# @Desc : layers.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.unetp.init_weights import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
# self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.conv = unetConv2(out_size * 2, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
# print(self.n_concat)
# print(input)
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0)
class unetUp_origin(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp_origin, self).__init__()
# self.conv = unetConv2(out_size*2, out_size, False)
if is_deconv:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
# print(self.n_concat)
# print(input)
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0)
| 35.329897
| 95
| 0.566093
| 455
| 3,427
| 4.046154
| 0.213187
| 0.076046
| 0.0717
| 0.063552
| 0.768061
| 0.752852
| 0.752852
| 0.752852
| 0.732754
| 0.710483
| 0
| 0.028085
| 0.314269
| 3,427
| 96
| 96
| 35.697917
| 0.755319
| 0.114678
| 0
| 0.61194
| 0
| 0
| 0.018887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.059701
| 0
| 0.238806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ce8c5d32aa7deb64d060bb2911a4ad28bae94c9a
| 1,630
|
py
|
Python
|
onap_tests/unit/components/sdnc.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | null | null | null |
onap_tests/unit/components/sdnc.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | null | null | null |
onap_tests/unit/components/sdnc.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | 2
|
2018-06-08T15:49:51.000Z
|
2021-06-22T10:06:30.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import unittest
class SdncTestingBase(unittest.TestCase):
pass
# {"input": {"sdnc-request-header":
# {"svc-notification-url":
# "http:\\/\\/onap.org:8080\\/adapters\\/rest\\/SDNCNotify",
# "svc-request-id": "test", "svc-action": "reserve"},
# "request-information":
# {"request-action": "PreloadVNFRequest", "order-version": "1",
# "notification-url": "onap.org", "order-number": "1", "request-id": "test"},
# "vnf-topology-information": {"vnf-assignments": {"vnf-vms": [],
# "availability-zones": [], "vnf-networks": []},
# "vnf-parameters":
# [{"vnf-parameter-name": "netconf_user_1",
# "vnf-parameter-value": "netconfuser1"},
# {"vnf-parameter-name": "netconf_password_1",
# "vnf-parameter-value": "ncuser1Pass"},
# {"vnf-parameter-name": "netconf_ssh_public_key_1",
# "vnf-parameter-value": "vmrf_key_pair"}],
# "vnf-topology-identifier":
# {"service-type": "a674f0ce-3f7e-4f75-96f7-39830e9a1b61",
# "generic-vnf-type": "vMRFaaS3/vMRF3 0",
# "vnf-name": "be1e0d5e-4c89-4467-b2ef-c1c3f8a5b136",
# "generic-vnf-name": "vMRFaaS3-service-instance-0DP8AF",
# "vnf-type": "vmrf30..Vmrf3..base_swms..module-0"}}}}
#
# SDNC url: /restconf/operations/VNF-API:preload-vnf-topology-operation
#
# {"output":{"svc-request-id":"test",
# "response-code":"200","ack-final-indicator":"Y"}}
| 36.222222
| 77
| 0.693865
| 205
| 1,630
| 5.463415
| 0.609756
| 0.064286
| 0.034821
| 0.061607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047844
| 0.089571
| 1,630
| 44
| 78
| 37.045455
| 0.706873
| 0.91227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
cea9d3d45019f38b2af20d0ed28e7db843971d6a
| 301
|
py
|
Python
|
dpctl/tests/_helper.py
|
reazulhoque/dpctl
|
27634efff7bcaf2096d3e236d9739e1a25e0d99e
|
[
"Apache-2.0"
] | 1
|
2020-08-09T13:55:34.000Z
|
2020-08-09T13:55:34.000Z
|
dpctl/tests/_helper.py
|
PokhodenkoSA/pydppl
|
9b8644b167a2bc9d1067e5c0d13d3abef0bff82b
|
[
"Apache-2.0"
] | 1
|
2021-07-30T09:01:28.000Z
|
2021-07-30T09:01:28.000Z
|
dpctl/tests/_helper.py
|
PokhodenkoSA/dpctl
|
9b8644b167a2bc9d1067e5c0d13d3abef0bff82b
|
[
"Apache-2.0"
] | null | null | null |
import dpctl
def has_gpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="gpu"))
def has_cpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="cpu"))
def has_sycl_platforms():
return bool(len(dpctl.get_platforms()))
| 21.5
| 74
| 0.747508
| 44
| 301
| 4.863636
| 0.409091
| 0.084112
| 0.17757
| 0.214953
| 0.607477
| 0.607477
| 0.607477
| 0.607477
| 0.607477
| 0.607477
| 0
| 0
| 0.112957
| 301
| 13
| 75
| 23.153846
| 0.801498
| 0
| 0
| 0
| 0
| 0
| 0.059801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
cebb3410e55a48d88294300e9e43bf648df548a0
| 8,633
|
py
|
Python
|
lotlan_scheduler/parser/LoTLanParserListener.py
|
iml130/lotlan-scheduler
|
b576f853706d614a918dccd9572cc2c2b666bbe4
|
[
"Apache-2.0"
] | null | null | null |
lotlan_scheduler/parser/LoTLanParserListener.py
|
iml130/lotlan-scheduler
|
b576f853706d614a918dccd9572cc2c2b666bbe4
|
[
"Apache-2.0"
] | null | null | null |
lotlan_scheduler/parser/LoTLanParserListener.py
|
iml130/lotlan-scheduler
|
b576f853706d614a918dccd9572cc2c2b666bbe4
|
[
"Apache-2.0"
] | null | null | null |
# Generated from LoTLanParser.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .LoTLanParser import LoTLanParser
else:
from LoTLanParser import LoTLanParser
# This class defines a complete listener for a parse tree produced by LoTLanParser.
class LoTLanParserListener(ParseTreeListener):
# Enter a parse tree produced by LoTLanParser#program.
def enterProgram(self, ctx:LoTLanParser.ProgramContext):
pass
# Exit a parse tree produced by LoTLanParser#program.
def exitProgram(self, ctx:LoTLanParser.ProgramContext):
pass
# Enter a parse tree produced by LoTLanParser#template.
def enterTemplate(self, ctx:LoTLanParser.TemplateContext):
pass
# Exit a parse tree produced by LoTLanParser#template.
def exitTemplate(self, ctx:LoTLanParser.TemplateContext):
pass
# Enter a parse tree produced by LoTLanParser#templateStart.
def enterTemplateStart(self, ctx:LoTLanParser.TemplateStartContext):
pass
# Exit a parse tree produced by LoTLanParser#templateStart.
def exitTemplateStart(self, ctx:LoTLanParser.TemplateStartContext):
pass
# Enter a parse tree produced by LoTLanParser#instance.
def enterInstance(self, ctx:LoTLanParser.InstanceContext):
pass
# Exit a parse tree produced by LoTLanParser#instance.
def exitInstance(self, ctx:LoTLanParser.InstanceContext):
pass
# Enter a parse tree produced by LoTLanParser#instanceStart.
def enterInstanceStart(self, ctx:LoTLanParser.InstanceStartContext):
pass
# Exit a parse tree produced by LoTLanParser#instanceStart.
def exitInstanceStart(self, ctx:LoTLanParser.InstanceStartContext):
pass
# Enter a parse tree produced by LoTLanParser#memberVariable.
def enterMemberVariable(self, ctx:LoTLanParser.MemberVariableContext):
pass
# Exit a parse tree produced by LoTLanParser#memberVariable.
def exitMemberVariable(self, ctx:LoTLanParser.MemberVariableContext):
pass
# Enter a parse tree produced by LoTLanParser#value.
def enterValue(self, ctx:LoTLanParser.ValueContext):
pass
# Exit a parse tree produced by LoTLanParser#value.
def exitValue(self, ctx:LoTLanParser.ValueContext):
pass
# Enter a parse tree produced by LoTLanParser#transportOrderStep.
def enterTransportOrderStep(self, ctx:LoTLanParser.TransportOrderStepContext):
pass
# Exit a parse tree produced by LoTLanParser#transportOrderStep.
def exitTransportOrderStep(self, ctx:LoTLanParser.TransportOrderStepContext):
pass
# Enter a parse tree produced by LoTLanParser#tosStart.
def enterTosStart(self, ctx:LoTLanParser.TosStartContext):
pass
# Exit a parse tree produced by LoTLanParser#tosStart.
def exitTosStart(self, ctx:LoTLanParser.TosStartContext):
pass
# Enter a parse tree produced by LoTLanParser#tosStatement.
def enterTosStatement(self, ctx:LoTLanParser.TosStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#tosStatement.
def exitTosStatement(self, ctx:LoTLanParser.TosStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#locationStatement.
def enterLocationStatement(self, ctx:LoTLanParser.LocationStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#locationStatement.
def exitLocationStatement(self, ctx:LoTLanParser.LocationStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#optTosStatement.
def enterOptTosStatement(self, ctx:LoTLanParser.OptTosStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#optTosStatement.
def exitOptTosStatement(self, ctx:LoTLanParser.OptTosStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#eventStatement.
def enterEventStatement(self, ctx:LoTLanParser.EventStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#eventStatement.
def exitEventStatement(self, ctx:LoTLanParser.EventStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#onDoneStatement.
def enterOnDoneStatement(self, ctx:LoTLanParser.OnDoneStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#onDoneStatement.
def exitOnDoneStatement(self, ctx:LoTLanParser.OnDoneStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#parameterStatement.
def enterParameterStatement(self, ctx:LoTLanParser.ParameterStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#parameterStatement.
def exitParameterStatement(self, ctx:LoTLanParser.ParameterStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#task.
def enterTask(self, ctx:LoTLanParser.TaskContext):
pass
# Exit a parse tree produced by LoTLanParser#task.
def exitTask(self, ctx:LoTLanParser.TaskContext):
pass
# Enter a parse tree produced by LoTLanParser#taskStart.
def enterTaskStart(self, ctx:LoTLanParser.TaskStartContext):
pass
# Exit a parse tree produced by LoTLanParser#taskStart.
def exitTaskStart(self, ctx:LoTLanParser.TaskStartContext):
pass
# Enter a parse tree produced by LoTLanParser#taskStatement.
def enterTaskStatement(self, ctx:LoTLanParser.TaskStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#taskStatement.
def exitTaskStatement(self, ctx:LoTLanParser.TaskStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#constraintsStatement.
def enterConstraintsStatement(self, ctx:LoTLanParser.ConstraintsStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#constraintsStatement.
def exitConstraintsStatement(self, ctx:LoTLanParser.ConstraintsStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#transportOrder.
def enterTransportOrder(self, ctx:LoTLanParser.TransportOrderContext):
pass
# Exit a parse tree produced by LoTLanParser#transportOrder.
def exitTransportOrder(self, ctx:LoTLanParser.TransportOrderContext):
pass
# Enter a parse tree produced by LoTLanParser#fromStatement.
def enterFromStatement(self, ctx:LoTLanParser.FromStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#fromStatement.
def exitFromStatement(self, ctx:LoTLanParser.FromStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#toStatement.
def enterToStatement(self, ctx:LoTLanParser.ToStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#toStatement.
def exitToStatement(self, ctx:LoTLanParser.ToStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#parameters.
def enterParameters(self, ctx:LoTLanParser.ParametersContext):
pass
# Exit a parse tree produced by LoTLanParser#parameters.
def exitParameters(self, ctx:LoTLanParser.ParametersContext):
pass
# Enter a parse tree produced by LoTLanParser#repeatStatement.
def enterRepeatStatement(self, ctx:LoTLanParser.RepeatStatementContext):
pass
# Exit a parse tree produced by LoTLanParser#repeatStatement.
def exitRepeatStatement(self, ctx:LoTLanParser.RepeatStatementContext):
pass
# Enter a parse tree produced by LoTLanParser#expression.
def enterExpression(self, ctx:LoTLanParser.ExpressionContext):
pass
# Exit a parse tree produced by LoTLanParser#expression.
def exitExpression(self, ctx:LoTLanParser.ExpressionContext):
pass
# Enter a parse tree produced by LoTLanParser#binOperation.
def enterBinOperation(self, ctx:LoTLanParser.BinOperationContext):
pass
# Exit a parse tree produced by LoTLanParser#binOperation.
def exitBinOperation(self, ctx:LoTLanParser.BinOperationContext):
pass
# Enter a parse tree produced by LoTLanParser#unOperation.
def enterUnOperation(self, ctx:LoTLanParser.UnOperationContext):
pass
# Exit a parse tree produced by LoTLanParser#unOperation.
def exitUnOperation(self, ctx:LoTLanParser.UnOperationContext):
pass
# Enter a parse tree produced by LoTLanParser#con.
def enterCon(self, ctx:LoTLanParser.ConContext):
pass
# Exit a parse tree produced by LoTLanParser#con.
def exitCon(self, ctx:LoTLanParser.ConContext):
pass
del LoTLanParser
| 32.700758
| 86
| 0.741921
| 886
| 8,633
| 7.22009
| 0.168172
| 0.053463
| 0.089104
| 0.160388
| 0.819759
| 0.488354
| 0.483352
| 0.48257
| 0
| 0
| 0
| 0.000578
| 0.197961
| 8,633
| 264
| 87
| 32.700758
| 0.92331
| 0.37959
| 0
| 0.470588
| 1
| 0
| 0.000191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.470588
| false
| 0.470588
| 0.02521
| 0
| 0.504202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0c9b7e6cbf3bfc9e02bf5d7b89e503bbfdb26688
| 2,119
|
py
|
Python
|
matrix_ops.py
|
rkibria/display3dfile
|
d1ea6d369d4ebee60b33734f94292f18f0754c2d
|
[
"MIT"
] | null | null | null |
matrix_ops.py
|
rkibria/display3dfile
|
d1ea6d369d4ebee60b33734f94292f18f0754c2d
|
[
"MIT"
] | null | null | null |
matrix_ops.py
|
rkibria/display3dfile
|
d1ea6d369d4ebee60b33734f94292f18f0754c2d
|
[
"MIT"
] | null | null | null |
import math
def matrixMult(v, m):
return (
m[ 0] * v[0] + m[ 1] * v[1] + m[ 2] * v[2] + m[ 3] * v[3],
m[ 4] * v[0] + m[ 5] * v[1] + m[ 6] * v[2] + m[ 7] * v[3],
m[ 8] * v[0] + m[ 9] * v[1] + m[10] * v[2] + m[11] * v[3],
m[12] * v[0] + m[13] * v[1] + m[14] * v[2] + m[15] * v[3],
)
def getTranslationMatrix(dx, dy, dz):
return [
1.0, 0.0, 0.0, float(dx),
0.0, 1.0, 0.0, float(dy),
0.0, 0.0, 1.0, float(dz),
0.0, 0.0, 0.0, 1.0,
]
def getRotateXMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
1.0, 0.0, 0.0, 0.0,
0.0, cos_phi, -sin_phi, 0.0,
0.0, sin_phi, cos_phi, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getRotateYMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
cos_phi, 0.0, sin_phi, 0.0,
0.0, 1.0, 0.0, 0.0,
-sin_phi, 0.0, cos_phi, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getRotateZMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
cos_phi, -sin_phi, 0.0, 0.0,
sin_phi, cos_phi, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getScaleMatrix(sx, sy, sz):
return [
float(sx), 0.0, 0.0, 0.0,
0.0, float(sy), 0.0, 0.0,
0.0, 0.0, float(sz), 0.0,
0.0, 0.0, 0.0, 1.0,
]
| 37.839286
| 74
| 0.283624
| 285
| 2,119
| 2.045614
| 0.133333
| 0.301887
| 0.35506
| 0.363636
| 0.603774
| 0.603774
| 0.54717
| 0.526587
| 0.526587
| 0.463122
| 0
| 0.177437
| 0.569136
| 2,119
| 55
| 75
| 38.527273
| 0.461117
| 0
| 0
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.020408
| 0.061224
| 0.265306
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b7432bfb99d5cfbdeba9186f7d72161c81fc535
| 108
|
py
|
Python
|
py/Utility.SetData.py
|
mathematicalmichael/SpringNodes
|
3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab
|
[
"MIT"
] | 51
|
2015-09-25T09:30:57.000Z
|
2022-01-19T14:16:44.000Z
|
py/Utility.SetData.py
|
sabeelcoder/SpringNodes
|
e21a24965474d54369e74d23c06f8c42a7b926b5
|
[
"MIT"
] | 66
|
2015-09-30T02:43:32.000Z
|
2022-03-31T02:26:52.000Z
|
py/Utility.SetData.py
|
sabeelcoder/SpringNodes
|
e21a24965474d54369e74d23c06f8c42a7b926b5
|
[
"MIT"
] | 48
|
2015-11-19T01:34:47.000Z
|
2022-02-25T17:26:48.000Z
|
import System
dataKey, data = IN
System.AppDomain.CurrentDomain.SetData("_Dyn_Wireless_%s" % dataKey, data)
| 27
| 74
| 0.796296
| 14
| 108
| 5.928571
| 0.785714
| 0.26506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 108
| 4
| 74
| 27
| 0.846939
| 0
| 0
| 0
| 0
| 0
| 0.146789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0b8d9b5f54af7a02bfebe6bb369e3b7b52e4fff6
| 259
|
py
|
Python
|
src/airfly/_vendor/airflow/contrib/operators/gcp_text_to_speech_operator.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | 7
|
2021-09-27T11:38:48.000Z
|
2022-02-01T06:06:24.000Z
|
src/airfly/_vendor/airflow/contrib/operators/gcp_text_to_speech_operator.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
src/airfly/_vendor/airflow/contrib/operators/gcp_text_to_speech_operator.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.providers.google.cloud.operators.text_to_speech import (
CloudTextToSpeechSynthesizeOperator,
)
class GcpTextToSpeechSynthesizeOperator(CloudTextToSpeechSynthesizeOperator):
pass
| 28.777778
| 84
| 0.837838
| 23
| 259
| 9.304348
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096525
| 259
| 8
| 85
| 32.375
| 0.91453
| 0.150579
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0b95ea2572caea42cfef156c084df6e66bc2a75b
| 105
|
py
|
Python
|
vnpy/app/realtime_monitor/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | 5
|
2019-05-24T05:19:55.000Z
|
2020-07-29T13:21:49.000Z
|
vnpy/app/realtime_monitor/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | null | null | null |
vnpy/app/realtime_monitor/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | 2
|
2019-07-01T02:14:04.000Z
|
2020-07-29T13:21:53.000Z
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@author:Hadrianl
"""
from .widget import CandleChartWidget
| 13.125
| 37
| 0.657143
| 12
| 105
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.12381
| 105
| 8
| 37
| 13.125
| 0.73913
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0bc07f714464b22a45e71ba4f3cc3e49b022e34b
| 214
|
py
|
Python
|
yourcfp/proposals/admin.py
|
sujay0399/CFP
|
cc39d4322fa1e1f1867e96c6208fd52ba55b3e8e
|
[
"MIT"
] | 2
|
2019-06-10T11:30:48.000Z
|
2019-08-17T21:19:12.000Z
|
yourcfp/proposals/admin.py
|
sujay0399/CFP
|
cc39d4322fa1e1f1867e96c6208fd52ba55b3e8e
|
[
"MIT"
] | 32
|
2019-05-22T19:38:43.000Z
|
2019-12-12T07:48:18.000Z
|
yourcfp/proposals/admin.py
|
sujay0399/CFP
|
cc39d4322fa1e1f1867e96c6208fd52ba55b3e8e
|
[
"MIT"
] | 1
|
2019-05-19T14:07:50.000Z
|
2019-05-19T14:07:50.000Z
|
from django.contrib import admin
from .models import Proposal, ProposalStatus, Feedback
# Register your models here.
admin.site.register(Proposal)
admin.site.register(ProposalStatus)
admin.site.register(Feedback)
| 26.75
| 54
| 0.827103
| 27
| 214
| 6.555556
| 0.481481
| 0.152542
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088785
| 214
| 7
| 55
| 30.571429
| 0.907692
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e7f6c3a12191b3ddcadaf1fb8c25c606407c6263
| 25
|
py
|
Python
|
RUNFILE.py
|
AbhilashPal/IETHackathon18
|
78a8240ff0e5e16156f72956991c9c2510ab7c7f
|
[
"MIT"
] | null | null | null |
RUNFILE.py
|
AbhilashPal/IETHackathon18
|
78a8240ff0e5e16156f72956991c9c2510ab7c7f
|
[
"MIT"
] | null | null | null |
RUNFILE.py
|
AbhilashPal/IETHackathon18
|
78a8240ff0e5e16156f72956991c9c2510ab7c7f
|
[
"MIT"
] | 1
|
2019-03-13T10:24:40.000Z
|
2019-03-13T10:24:40.000Z
|
import py1
py1.func1()
| 8.333333
| 12
| 0.68
| 4
| 25
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.2
| 25
| 2
| 13
| 12.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e7f7a5e47c572cd74e2ca8502f25e6892a5705da
| 149
|
py
|
Python
|
DateTime/TimeExample1.py
|
suprit08/PythonAssignments
|
6cab78660d8c77cf573cbea82e4dada19b0fc08c
|
[
"MIT"
] | null | null | null |
DateTime/TimeExample1.py
|
suprit08/PythonAssignments
|
6cab78660d8c77cf573cbea82e4dada19b0fc08c
|
[
"MIT"
] | null | null | null |
DateTime/TimeExample1.py
|
suprit08/PythonAssignments
|
6cab78660d8c77cf573cbea82e4dada19b0fc08c
|
[
"MIT"
] | null | null | null |
#TimeExample1.py
import time
#Printing the no.of ticks spent since 12AM, 1st January 1970
print("No.of total ticks since 1970 : ",time.time())
| 24.833333
| 61
| 0.724832
| 24
| 149
| 4.5
| 0.708333
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 0.174497
| 149
| 6
| 62
| 24.833333
| 0.780488
| 0.496644
| 0
| 0
| 0
| 0
| 0.449275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
f00b3ff3621b2aec8525d673155e5394b229c758
| 65
|
py
|
Python
|
src/core/uv_edit/helpers/__init__.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 63
|
2016-01-02T16:28:47.000Z
|
2022-01-19T11:29:51.000Z
|
src/core/uv_edit/helpers/__init__.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 12
|
2016-06-12T14:14:15.000Z
|
2020-12-18T16:11:45.000Z
|
src/core/uv_edit/helpers/__init__.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 17
|
2016-05-23T00:02:27.000Z
|
2021-04-25T17:48:27.000Z
|
from .grid import Grid
from .trnsf_gizmo import UVTransformGizmo
| 21.666667
| 41
| 0.846154
| 9
| 65
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 65
| 2
| 42
| 32.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f02297c941f2ac0c91c1553310250eea23e91813
| 78
|
py
|
Python
|
scripts/field/go50000.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 2
|
2020-04-15T03:16:07.000Z
|
2020-08-12T23:28:32.000Z
|
scripts/field/go50000.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/field/go50000.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 3
|
2020-08-25T06:55:25.000Z
|
2020-12-01T13:07:43.000Z
|
# Inside Dangerous Forest
sm.showEffect("Map/Effect.img/maplemap/enter/50000")
| 39
| 52
| 0.807692
| 11
| 78
| 5.727273
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 0.051282
| 78
| 2
| 52
| 39
| 0.783784
| 0.294872
| 0
| 0
| 0
| 0
| 0.648148
| 0.648148
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f02949e0cd24e0b7a8f6a0e5fa240c297e91ba9a
| 151
|
py
|
Python
|
task/admin.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | null | null | null |
task/admin.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | 1
|
2021-06-10T23:00:14.000Z
|
2021-06-10T23:00:14.000Z
|
task/admin.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Tasks, Audit
admin.site.register(Tasks)
admin.site.register(Audit)
| 18.875
| 32
| 0.794702
| 22
| 151
| 5.454545
| 0.545455
| 0.15
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119205
| 151
| 7
| 33
| 21.571429
| 0.902256
| 0.172185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f02ef548c818c988f4a4155f301151d124148863
| 162
|
py
|
Python
|
fmridenoise/interfaces/__init__.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
fmridenoise/interfaces/__init__.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
fmridenoise/interfaces/__init__.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
from .quality_measures import (QualityMeasures,
PipelinesQualityMeasures,
MergeGroupQualityMeasures)
| 54
| 57
| 0.555556
| 7
| 162
| 12.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.41358
| 162
| 3
| 57
| 54
| 0.936842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f059f2d51603c41a48b96eb9cf19790fe7bf6136
| 2,011
|
py
|
Python
|
src/utils/logger.py
|
gdevos010/ml_supervised_learning
|
ffe7ce99b0fdd346df6324a55370548873bc7b72
|
[
"MIT"
] | null | null | null |
src/utils/logger.py
|
gdevos010/ml_supervised_learning
|
ffe7ce99b0fdd346df6324a55370548873bc7b72
|
[
"MIT"
] | null | null | null |
src/utils/logger.py
|
gdevos010/ml_supervised_learning
|
ffe7ce99b0fdd346df6324a55370548873bc7b72
|
[
"MIT"
] | null | null | null |
import inspect
import logging
import os
from datetime import datetime
logger = logging.getLogger(__name__)
def info(msg):
level = "INFO"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def debug(msg):
level = "DEBUG"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def error(msg):
level = "ERROR"
frame, filename, line_number, function_name, lines, index = inspect.getouterframes(
inspect.currentframe())[1]
line = lines[0]
indentation_level = line.find(line.lstrip())
now = datetime.now().time().strftime("%H:%M:%S")
prefix = f'[{now} {os.path.basename(filename)}:{line_number} - {function_name}'.ljust(45) + f'] {level}:'
logger.info('{prefix}{i}\t{m}'.format(
prefix=prefix,
i=' ' * max(0, indentation_level - 8),
m=msg
))
def init_logger():
logger = logging.getLogger(__name__)
logging.basicConfig(format="",
handlers=[
logging.FileHandler("ml_supervised.log", 'a'),
logging.StreamHandler()
])
logger.setLevel(logging.DEBUG)
| 30.938462
| 109
| 0.593237
| 238
| 2,011
| 4.894958
| 0.235294
| 0.061803
| 0.092704
| 0.133906
| 0.751931
| 0.751931
| 0.751931
| 0.751931
| 0.751931
| 0.751931
| 0
| 0.011711
| 0.235704
| 2,011
| 64
| 110
| 31.421875
| 0.746259
| 0
| 0
| 0.673077
| 0
| 0
| 0.168076
| 0.062655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f06131eff75ddc6fde7cf77ba3295bb9165315a1
| 93
|
py
|
Python
|
exercicios-turtle/.history/conversor_temp_20210624131757.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | 1
|
2021-11-28T18:49:21.000Z
|
2021-11-28T18:49:21.000Z
|
exercicios-turtle/.history/conversor_temp_20210624131757.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
exercicios-turtle/.history/conversor_temp_20210624131757.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
print("Abaixo digite o valor da temperatura em graus para saber sua equivalencia em farhent")
| 93
| 93
| 0.817204
| 15
| 93
| 5.066667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 1
| 93
| 93
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f066d159f049879af57017c36c170c23e929fbc9
| 84
|
py
|
Python
|
vidsz/opencv/writer/__init__.py
|
BlueMirrors/vidsz
|
c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7
|
[
"Apache-2.0"
] | 10
|
2021-06-13T07:09:42.000Z
|
2022-02-03T16:29:13.000Z
|
vidsz/opencv/writer/__init__.py
|
BlueMirrors/vidsz
|
c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:40:57.000Z
|
2022-01-31T08:09:31.000Z
|
vidsz/opencv/writer/__init__.py
|
BlueMirrors/vidsz
|
c47f09a6b8cb8da9a0b6c97caf99bc2baab6fee7
|
[
"Apache-2.0"
] | 1
|
2021-09-30T21:02:55.000Z
|
2021-09-30T21:02:55.000Z
|
"""Implements vidsz's Writer for Opencv Backend
"""
from .base_writer import Writer
| 21
| 47
| 0.77381
| 12
| 84
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 84
| 3
| 48
| 28
| 0.876712
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6525057b487c01026e2ea1ca287e70baa93193fa
| 105
|
py
|
Python
|
office365/sharepoint/social/socialRestActor.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/sharepoint/social/socialRestActor.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/sharepoint/social/socialRestActor.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.runtime.client_object import ClientObject
class SocialRestActor(ClientObject):
pass
| 17.5
| 56
| 0.828571
| 11
| 105
| 7.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032609
| 0.12381
| 105
| 5
| 57
| 21
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
652f17d43938dcf31e21ec8792cc0d07ba906cb6
| 111
|
py
|
Python
|
module.py
|
damnkk/cycle-GAN
|
fbf84eb67ef0a1ba909e95c6862f72419c6f4185
|
[
"MIT"
] | null | null | null |
module.py
|
damnkk/cycle-GAN
|
fbf84eb67ef0a1ba909e95c6862f72419c6f4185
|
[
"MIT"
] | null | null | null |
module.py
|
damnkk/cycle-GAN
|
fbf84eb67ef0a1ba909e95c6862f72419c6f4185
|
[
"MIT"
] | null | null | null |
import tensorflow.compat.v1 as tf
import ops
import utils
from reader import Reader
from gen import Generator
| 15.857143
| 33
| 0.828829
| 18
| 111
| 5.111111
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.153153
| 111
| 6
| 34
| 18.5
| 0.968085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e8f1217d476c4e333500c56daec82417a2932258
| 58
|
py
|
Python
|
src/petronia/defimpl/configuration/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 19
|
2017-06-21T10:28:24.000Z
|
2021-12-31T11:49:28.000Z
|
src/petronia/defimpl/configuration/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 10
|
2016-11-11T18:57:57.000Z
|
2021-02-01T15:33:43.000Z
|
src/petronia/defimpl/configuration/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 3
|
2017-09-17T03:29:35.000Z
|
2019-06-03T10:43:08.000Z
|
"""
Initial extension configuration implementations.
"""
| 11.6
| 48
| 0.758621
| 4
| 58
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 4
| 49
| 14.5
| 0.862745
| 0.827586
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3310c438f5ddc574530b96b1e372b8b133d33d06
| 7,284
|
py
|
Python
|
tests/algorithms/test_pbr.py
|
UCL/scikit-surgeryfredwebapp
|
3e22fc8b9d0898502a5f8a6c8cc813dc62fc3fd5
|
[
"BSD-3-Clause"
] | 5
|
2020-10-22T01:41:33.000Z
|
2022-01-07T08:55:39.000Z
|
tests/algorithms/test_pbr.py
|
UCL/scikit-surgeryfred
|
3e60a67fced1ca38f54920ccf37043588bdb1401
|
[
"BSD-3-Clause"
] | 62
|
2020-06-05T10:54:04.000Z
|
2021-05-18T19:31:27.000Z
|
tests/algorithms/test_pbr.py
|
UCL/scikit-surgeryfredwebapp
|
3e22fc8b9d0898502a5f8a6c8cc813dc62fc3fd5
|
[
"BSD-3-Clause"
] | 1
|
2020-06-25T09:59:53.000Z
|
2020-06-25T09:59:53.000Z
|
# coding=utf-8
"""Fiducial Registration Educational Demonstration tests"""
import math
import numpy as np
from scipy.stats import linregress
import pytest
from sksurgeryfred.algorithms.errors import expected_absolute_value
import sksurgeryfred.algorithms.point_based_reg as pbreg
def _make_circle_fiducials(no_fids, centre, radius,
fixed_stddevs, moving_stddevs):
fixed_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64)
moving_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64)
angle_inc = math.pi * 2.0 / float(no_fids)
for fid in range(no_fids):
fixed_fids[fid] = ([radius * math.cos(angle_inc*fid),
radius * math.sin(angle_inc*fid),
0.0] +
np.random.normal(scale=fixed_stddevs) +
centre)
moving_fids[fid] = ([radius * math.cos(angle_inc*fid),
radius * math.sin(angle_inc*fid),
0.0] +
np.random.normal(scale=moving_stddevs) +
centre)
return fixed_fids, moving_fids
def _run_registrations (pbr, no_fids, centre, radius, fixed_stddevs,
moving_stddevs, repeats):
tres=np.empty(repeats, dtype=np.float64)
fres=np.empty(repeats, dtype=np.float64)
np.random.seed(0)
for i in range(repeats):
fixed_fids, moving_fids = _make_circle_fiducials(no_fids, centre,
radius,
fixed_stddevs,
moving_stddevs)
[_success, fres[i], _mean_fle, expected_tre_squared, expected_fre,
_transformed_target_2d, tres[i], _no_fids] = pbr.register(
fixed_fids, moving_fids)
ave_tre = np.average(tres * tres)
ave_fre = np.average(fres * fres)
_slope, _intercept, _r_value, p_value, _std_err = linregress(tres, fres)
return ave_tre, ave_fre, expected_tre_squared, expected_fre, p_value
def test_init_with_moving_fle():
"""
Init pbr with moving fle should yield non implemented error
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
with pytest.raises(NotImplementedError):
pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
def test_pbr_3_fids():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 20.0
expected_tre_squared = 0
expected_fre = 0
repeats = 100
no_fids = 3
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_10_fids():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 2.0
repeats = 200
no_fids = 10
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_10_fids_offset_target():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[2.0, 1.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 2.0
repeats = 200
no_fids = 10
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_20_fids_offset_target():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[2.0, 1.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 20.0
repeats = 200
no_fids = 20
#test get transformed target before registration
status, transformed_target = pbr.get_transformed_target()
assert not status
assert transformed_target is None
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
#test get transformed target after registration
status, transformed_target = pbr.get_transformed_target()
assert status
assert np.allclose(np.transpose(transformed_target), target, atol=1.0)
| 35.359223
| 79
| 0.636052
| 1,045
| 7,284
| 4.12823
| 0.117703
| 0.031989
| 0.031989
| 0.030598
| 0.777469
| 0.767038
| 0.754057
| 0.745943
| 0.731572
| 0.688456
| 0
| 0.049564
| 0.260434
| 7,284
| 205
| 80
| 35.531707
| 0.751253
| 0.044481
| 0
| 0.601563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.054688
| false
| 0
| 0.046875
| 0
| 0.117188
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3315cb7e01e834143b1e6aeae4bdc43863b4c1d1
| 2,339
|
py
|
Python
|
examples/official/trial/fashion_mnist_tf_keras/data.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 3
|
2020-04-30T03:56:15.000Z
|
2020-04-30T04:01:24.000Z
|
examples/official/trial/fashion_mnist_tf_keras/data.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 1
|
2022-02-10T07:31:44.000Z
|
2022-02-10T07:31:44.000Z
|
examples/official/trial/fashion_mnist_tf_keras/data.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 2
|
2020-07-10T23:08:23.000Z
|
2021-01-13T10:01:59.000Z
|
"""
This files mimics keras.dataset download's function.
For parallel and distributed training, we need to account
for multiple processes (one per GPU) per agent.
For more information on data in Determined, read our data-access tutorial.
"""
import gzip
import tempfile
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
def load_training_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"train-labels-idx1-ubyte.gz",
"train-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
return x_train, y_train
def load_validation_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_test, y_test)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"t10k-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return x_test, y_test
| 29.2375
| 98
| 0.671227
| 323
| 2,339
| 4.773994
| 0.346749
| 0.062257
| 0.036316
| 0.044099
| 0.705577
| 0.705577
| 0.705577
| 0.705577
| 0.705577
| 0.705577
| 0
| 0.016146
| 0.205643
| 2,339
| 79
| 99
| 29.607595
| 0.813778
| 0.34844
| 0
| 0.470588
| 0
| 0
| 0.159059
| 0.070539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3333fbb20232b85b8f1280cce3be1b65d0f68751
| 143
|
py
|
Python
|
ee/api/chalicelib/blueprints/bp_ee_crons.py
|
nogamenofun98/openreplay
|
543384496fbfd5bd95482bd51b15865acba78bda
|
[
"MIT"
] | 3,614
|
2021-05-22T08:23:31.000Z
|
2022-03-31T19:46:01.000Z
|
ee/api/chalicelib/blueprints/bp_ee_crons.py
|
aayushgautam/openreplay
|
3298230c3a04fe537794bf396bdaf695c81301c6
|
[
"MIT"
] | 245
|
2021-05-25T14:49:35.000Z
|
2022-03-30T22:15:28.000Z
|
ee/api/chalicelib/blueprints/bp_ee_crons.py
|
aayushgautam/openreplay
|
3298230c3a04fe537794bf396bdaf695c81301c6
|
[
"MIT"
] | 151
|
2021-05-22T07:57:17.000Z
|
2022-03-29T00:37:31.000Z
|
from chalice import Blueprint
from chalice import Cron
from chalicelib import _overrides
app = Blueprint(__name__)
_overrides.chalice_app(app)
| 23.833333
| 33
| 0.846154
| 19
| 143
| 6
| 0.473684
| 0.192982
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111888
| 143
| 6
| 34
| 23.833333
| 0.897638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0.4
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
68397ebfbc1a70702bfe2ec9942407cc4111e82c
| 73
|
py
|
Python
|
phonotactics/codas/__init__.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
phonotactics/codas/__init__.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
phonotactics/codas/__init__.py
|
shlomo-Kallner/coventreiya
|
aa0773693220025f8d2c23644a2c5d9d884773e9
|
[
"Apache-2.0"
] | null | null | null |
__package__ = "codas"
__all__ = [ "codas" , "ver_1_5_1" , "ver_1_5_7" ]
| 18.25
| 49
| 0.630137
| 12
| 73
| 2.666667
| 0.583333
| 0.25
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0.178082
| 73
| 3
| 50
| 24.333333
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6851ed96358cf7394f0adf782e5cb09b7ad29a20
| 170
|
py
|
Python
|
test/test_data/recursive_test_extension/__init__.py
|
CuteFwan/dango.py
|
315d74ab32a512a5e54043ebbd1ff8559e592c52
|
[
"MIT"
] | 30
|
2017-07-12T11:40:58.000Z
|
2021-09-05T21:15:44.000Z
|
test/test_data/recursive_test_extension/__init__.py
|
CuteFwan/dango.py
|
315d74ab32a512a5e54043ebbd1ff8559e592c52
|
[
"MIT"
] | 11
|
2017-12-25T00:08:49.000Z
|
2020-10-29T05:30:14.000Z
|
test/test_data/recursive_test_extension/__init__.py
|
CuteFwan/dango.py
|
315d74ab32a512a5e54043ebbd1ff8559e592c52
|
[
"MIT"
] | 9
|
2017-09-15T14:58:52.000Z
|
2021-03-17T08:32:18.000Z
|
from dango import dcog, Cog
from .cmds import SubModule # noqa pylint: disable=unused-import
@dcog()
class InModule(Cog):
def __init__(self, config):
pass
| 18.888889
| 65
| 0.7
| 23
| 170
| 5
| 0.782609
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 170
| 8
| 66
| 21.25
| 0.851852
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
6865a909ca1c90f16c02afa21cd79d1d6521d5fb
| 127,108
|
py
|
Python
|
tests/test_bql.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 964
|
2015-09-24T15:02:05.000Z
|
2022-03-29T21:41:21.000Z
|
tests/test_bql.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 435
|
2015-09-23T16:46:58.000Z
|
2020-04-19T12:32:03.000Z
|
tests/test_bql.py
|
almartin82/bayeslite
|
a27f243b5f16cc6a01e84336a829e5b65d665b7b
|
[
"Apache-2.0"
] | 86
|
2015-10-24T20:08:30.000Z
|
2021-08-09T13:53:00.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import apsw
import pytest
import struct
import bayeslite
import bayeslite.ast as ast
import bayeslite.compiler as compiler
import bayeslite.core as core
import bayeslite.guess as guess
import bayeslite.backends.troll_rng as troll
import bayeslite.parse as parse
from bayeslite.exception import BQLError
from bayeslite.math_util import relerr
from bayeslite.backends.cgpm_backend import CGPM_Backend
from bayeslite.util import cursor_value
import test_core
import test_csv
from stochastic import stochastic
def bql2sql(string, setup=None):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
if setup is not None:
setup(bdb)
phrases = parse.parse_bql_string(string)
out = compiler.Output(0, {}, ())
for phrase in phrases:
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
out.write(';')
return out.getvalue()
# XXX Kludgey mess. Please reorganize.
def bql2sqlparam(string):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
phrases = parse.parse_bql_string(string)
out0 = StringIO.StringIO()
for phrase in phrases:
out = None
if isinstance(phrase, ast.Parametrized):
bindings = (None,) * phrase.n_numpar
out = compiler.Output(phrase.n_numpar, phrase.nampar_map,
bindings)
phrase = phrase.phrase
else:
out = StringIO.StringIO()
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
# XXX Do something about the parameters.
out0.write(out.getvalue())
out0.write(';')
return out0.getvalue()
def bql_execute(bdb, string, bindings=()):
return map(tuple, bdb.execute(string, bindings))
def empty(cursor):
assert cursor is not None
assert cursor.description is not None
assert len(cursor.description) == 0
with pytest.raises(StopIteration):
cursor.next()
def test_trivial_population():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# XXX if (not) exists
bdb.execute('''
create population p for t (
guess stattypes of (*);
age numerical
)
''')
bdb.execute('drop population p')
def test_population_invalid_numerical():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with pytest.raises(BQLError):
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender numerical
)
''')
def test_population_invalid_numerical_alterpop_addvar():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
ignore gender
)
''')
with pytest.raises(BQLError):
bdb.execute('alter population p add variable gender numerical')
bdb.execute('drop population p')
def test_population_invalid_numerical_alterpop_stattype():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender nominal
)
''')
with pytest.raises(BQLError):
bdb.execute('''
alter population p set stattype of gender to numerical
''')
bdb.execute('drop population p')
def test_similarity_identity():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
rowids = bdb.sql_execute('select rowid from t1')
for rowid in rowids:
c = bdb.execute('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of age by p1
''', (rowid[0], rowid[0])).fetchall()
assert len(c) == 1
assert c[0][0] == 1
def test_predictive_relevance():
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate predictive relevance
of (label = 'mumble')
to existing rows (label = 'frotz' or age <= 4)
in the context of "label"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'mumble\')), '\
'\'[5, 8]\', 1);'
assert bql2sql('''
estimate label,
predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'hunf', "weight" = 7)
)
in the context of "age",
_rowid_ + 1
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2, 2, 82, 3, 14, NULL, 2, 74, 1, \'hunf\', 3, 7, NULL), '\
'("_rowid_" + 1) FROM "t1";'
# No matching rows should still compile.
assert bql2sql('''
estimate label,
predictive relevance to existing rows (rowid < 0)
in the context of "age"
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2) FROM "t1";'
# When using `BY`, require OF to be specified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "age"
by p1
''')
# When using `FROM`, require OF to be unspecified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
of (name = 'mansour')
to hypothetical rows with values (
("age" = 82, "weight" = 14)
)
in the context of "age"
from p1
''')
assert bql2sql('''
estimate label from p1
where
(predictive relevance to existing rows (label = 'quux' and age < 5)
in the context of "weight") > 1
order by
predictive relevance
to hypothetical rows with values ((label='zot'))
in the context of "age"
''') == \
'SELECT "label" FROM "t1" WHERE '\
'(bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[5]\', 3) > 1) '\
'ORDER BY bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[]\', 2, 1, \'zot\', NULL);'
@stochastic(max_runs=2, min_passes=1)
def test_conditional_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('drop generator p1_cc')
bdb.execute('drop population p1')
bdb.execute('''
create population p1 for t1 (
ignore id, label;
set stattype of age to numerical;
set stattype of weight to numerical
)
''')
bdb.execute('''
create generator p1_cond_prob_cc for p1;
''')
bdb.execute('initialize 1 model for p1_cond_prob_cc')
bdb.execute('alter generator p1_cond_prob_cc '
'ensure variables * dependent')
bdb.execute('analyze p1_cond_prob_cc for 1 iteration')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of age = 8 given () by p1'
age_is_8 = bdb.execute(q0).fetchvalue()
assert age_is_8 == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
age_is_8_given_weight_is_16 = bdb.execute(q2).fetchvalue()
assert age_is_8 < age_is_8_given_weight_is_16
probs = bdb.execute(
'estimate probability density of value 8 given (weight = 16)'
' from columns of p1 where v.name != \'weight\'').fetchall()
assert [(age_is_8_given_weight_is_16,)] == probs
@stochastic(max_runs=2, min_passes=1)
def test_joint_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('initialize 10 models for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8) by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q1 = 'estimate probability density of (age = 8) given () by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
assert bdb.execute(q0).fetchvalue() < bdb.execute(q2).fetchvalue()
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8, weight = 16) by p1'
assert bdb.execute(q1).fetchvalue() < bdb.execute(q0).fetchvalue()
q2 = 'estimate probability density of (age = 8, weight = 16)' \
" given (label = 'mumble') by p1"
assert bdb.execute(q1).fetchvalue() < bdb.execute(q2).fetchvalue()
def test_badbql():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(ValueError):
bdb.execute('')
with pytest.raises(ValueError):
bdb.execute(';')
with pytest.raises(ValueError):
bdb.execute('select 0; select 1')
def test_select_trivial():
assert bql2sql('select null;') == 'SELECT NULL;'
assert bql2sql("select 'x';") == "SELECT 'x';"
assert bql2sql("select 'x''y';") == "SELECT 'x''y';"
assert bql2sql('select "x";') == 'SELECT "x";'
assert bql2sql('select "x""y";') == 'SELECT "x""y";'
assert bql2sql('select 0;') == 'SELECT 0;'
assert bql2sql('select 0.;') == 'SELECT 0.0;'
assert bql2sql('select .0;') == 'SELECT 0.0;'
assert bql2sql('select 0.0;') == 'SELECT 0.0;'
assert bql2sql('select 1e0;') == 'SELECT 1.0;'
assert bql2sql('select 1e+1;') == 'SELECT 10.0;'
assert bql2sql('select 1e-1;') == 'SELECT 0.1;'
assert bql2sql('select -1e+1;') == 'SELECT (- 10.0);'
assert bql2sql('select +1e-1;') == 'SELECT (+ 0.1);'
assert bql2sql('select SQRT(1-EXP(-2*value)) FROM bm_mi;') == \
'SELECT "SQRT"((1 - "EXP"(((- 2) * "value")))) FROM "bm_mi";'
assert bql2sql('select .1e0;') == 'SELECT 0.1;'
assert bql2sql('select 1.e10;') == 'SELECT 10000000000.0;'
assert bql2sql('select all 0;') == 'SELECT 0;'
assert bql2sql('select distinct 0;') == 'SELECT DISTINCT 0;'
assert bql2sql('select 0 as z;') == 'SELECT 0 AS "z";'
assert bql2sql('select * from t;') == 'SELECT * FROM "t";'
assert bql2sql('select t.* from t;') == 'SELECT "t".* FROM "t";'
assert bql2sql('select c from t;') == 'SELECT "c" FROM "t";'
assert bql2sql('select c as d from t;') == 'SELECT "c" AS "d" FROM "t";'
assert bql2sql('select t.c as d from t;') == \
'SELECT "t"."c" AS "d" FROM "t";'
assert bql2sql('select t.c as d, p as q, x from t;') == \
'SELECT "t"."c" AS "d", "p" AS "q", "x" FROM "t";'
assert bql2sql('select * from t, u;') == 'SELECT * FROM "t", "u";'
assert bql2sql('select * from t as u;') == 'SELECT * FROM "t" AS "u";'
assert bql2sql('select * from (select 0);') == 'SELECT * FROM (SELECT 0);'
assert bql2sql('select t.c from (select d as c from u) as t;') == \
'SELECT "t"."c" FROM (SELECT "d" AS "c" FROM "u") AS "t";'
assert bql2sql('select * where x;') == 'SELECT * WHERE "x";'
assert bql2sql('select * from t where x;') == \
'SELECT * FROM "t" WHERE "x";'
assert bql2sql('select * group by x;') == 'SELECT * GROUP BY "x";'
assert bql2sql('select * from t where x group by y;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y";'
assert bql2sql('select * from t where x group by y, z;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y", "z";'
assert bql2sql('select * from t where x group by y having sum(z) < 1') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y" HAVING ("sum"("z") < 1);'
assert bql2sql('select * order by x;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x asc;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x desc;') == \
'SELECT * ORDER BY "x" DESC;'
assert bql2sql('select * order by x, y;') == 'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * order by x desc, y;') == \
'SELECT * ORDER BY "x" DESC, "y";'
assert bql2sql('select * order by x, y asc;') == \
'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * limit 32;') == 'SELECT * LIMIT 32;'
assert bql2sql('select * limit 32 offset 16;') == \
'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select * limit 16, 32;') == 'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select (select0);') == 'SELECT "select0";'
assert bql2sql('select (select 0);') == 'SELECT (SELECT 0);'
assert bql2sql('select f(f(), f(x), y);') == \
'SELECT "f"("f"(), "f"("x"), "y");'
assert bql2sql('select a and b or c or not d is e is not f like j;') == \
'SELECT ((("a" AND "b") OR "c") OR' \
+ ' (NOT ((("d" IS "e") IS NOT "f") LIKE "j")));'
assert bql2sql('select a like b not like c like d escape e;') == \
'SELECT ((("a" LIKE "b") NOT LIKE "c") LIKE "d" ESCAPE "e");'
assert bql2sql('select a like b escape c glob d not glob e;') == \
'SELECT ((("a" LIKE "b" ESCAPE "c") GLOB "d") NOT GLOB "e");'
assert bql2sql('select a not glob b glob c escape d;') == \
'SELECT (("a" NOT GLOB "b") GLOB "c" ESCAPE "d");'
assert bql2sql('select a glob b escape c regexp e not regexp f;') == \
'SELECT ((("a" GLOB "b" ESCAPE "c") REGEXP "e") NOT REGEXP "f");'
assert bql2sql('select a not regexp b regexp c escape d;') == \
'SELECT (("a" NOT REGEXP "b") REGEXP "c" ESCAPE "d");'
assert bql2sql('select a regexp b escape c not regexp d escape e;') == \
'SELECT (("a" REGEXP "b" ESCAPE "c") NOT REGEXP "d" ESCAPE "e");'
assert bql2sql('select a not regexp b escape c match e not match f;') == \
'SELECT ((("a" NOT REGEXP "b" ESCAPE "c") MATCH "e") NOT MATCH "f");'
assert bql2sql('select a not match b match c escape d;') == \
'SELECT (("a" NOT MATCH "b") MATCH "c" ESCAPE "d");'
assert bql2sql('select a match b escape c not match d escape e;') == \
'SELECT (("a" MATCH "b" ESCAPE "c") NOT MATCH "d" ESCAPE "e");'
assert bql2sql('select a not match b escape c between d and e;') == \
'SELECT (("a" NOT MATCH "b" ESCAPE "c") BETWEEN "d" AND "e");'
assert bql2sql('select a between b and c and d;') == \
'SELECT (("a" BETWEEN "b" AND "c") AND "d");'
assert bql2sql('select a like b like c escape d between e and f;') == \
'SELECT ((("a" LIKE "b") LIKE "c" ESCAPE "d") BETWEEN "e" AND "f");'
assert bql2sql('select a between b and c not between d and e;') == \
'SELECT (("a" BETWEEN "b" AND "c") NOT BETWEEN "d" AND "e");'
assert bql2sql('select a not between b and c in (select f);') == \
'SELECT (("a" NOT BETWEEN "b" AND "c") IN (SELECT "f"));'
assert bql2sql('select a in (select b) and c not in (select d);') == \
'SELECT (("a" IN (SELECT "b")) AND ("c" NOT IN (SELECT "d")));'
assert bql2sql("select a in (1 + 2, '3') and b not in (select c);") == \
'SELECT (("a" IN ((1 + 2), \'3\')) AND ("b" NOT IN (SELECT "c")));'
assert bql2sql('select a in (select b) isnull notnull!=c<>d<e<=f>g;') == \
'SELECT ((((("a" IN (SELECT "b")) ISNULL) NOTNULL) != "c") !=' \
+ ' ((("d" < "e") <= "f") > "g"));'
assert bql2sql('select a>b>=c<<d>>e&f|g+h-i*j/k;') == \
'SELECT (("a" > "b") >= (((("c" << "d") >> "e") & "f") |' \
+ ' (("g" + "h") - (("i" * "j") / "k"))));'
assert bql2sql('select a/b%c||~~d collate e collate\'f\'||1;') == \
'SELECT (("a" / "b") % (("c" || (((~ (~ "d")) COLLATE "e")' \
+ ' COLLATE "f")) || 1));'
assert bql2sql('select cast(f(x) as binary blob);') == \
'SELECT CAST("f"("x") AS "binary" "blob");'
assert bql2sql('select cast(42 as varint(73));') == \
'SELECT CAST(42 AS "varint"(73));'
assert bql2sql('select cast(f(x, y, z) as varchar(12 ,34));') == \
'SELECT CAST("f"("x", "y", "z") AS "varchar"(12, 34));'
assert bql2sql('select exists (select a) and not exists (select b);') == \
'SELECT (EXISTS (SELECT "a") AND (NOT EXISTS (SELECT "b")));'
assert bql2sql('select case when a - b then c else d end from t;') == \
'SELECT CASE WHEN ("a" - "b") THEN "c" ELSE "d" END FROM "t";'
assert bql2sql('select case f(a) when b + c then d else e end from t;') \
== \
'SELECT CASE "f"("a") WHEN ("b" + "c") THEN "d" ELSE "e" END FROM "t";'
def test_estimate_bql():
# PREDICTIVE PROBABILITY
assert bql2sql('estimate predictive probability of weight from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) given '
'(label) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[1]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1, 2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) given (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1]\', \'[2, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of age given (*) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2]\', \'[1, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate label, predictive probability of weight'
' from p1;') \
== \
'SELECT "label", ' \
'bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight, label'
' from p1;') \
== \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\'),' \
' "label"' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight given (*) + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[1, 2]\') + 1)' \
' FROM "t1";'
# PREDICTIVE PROBABILITY parse and compilation errors.
with pytest.raises(parse.BQLParseError):
# Need a table.
bql2sql('estimate predictive probability of weight;')
with pytest.raises(parse.BQLParseError):
# Need at most one generator.
bql2sql('estimate predictive probability of weight'
' from p1, p1;')
with pytest.raises(parse.BQLParseError):
# Need a generator name, not a subquery.
bql2sql('estimate predictive probability of weight'
' from (select 0);')
with pytest.raises(parse.BQLParseError):
# Need a column.
bql2sql('estimate predictive probability from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (*) in both targets and constraints.
bql2sql('estimate predictive probability of (*) given (*) from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (weight, *) in targets.
bql2sql('estimate predictive probability of (weight, *) given (age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (age, *) in constraints.
bql2sql('estimate predictive probability of weight given (*, age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using duplicate column age.
bql2sql('estimate predictive probability of age given (weight, age) '
'from p1;')
# PROBABILITY DENISTY.
assert bql2sql('estimate probability density of weight = 20 from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20) FROM "t1";'
assert bql2sql('estimate probability density of weight = 20'
' given (age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, NULL, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
" given (label = 'mumble') from p1;") == \
"SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8, NULL, 1, 'mumble')" \
' FROM "t1";'
assert bql2sql('estimate probability density of weight = (c + 1)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, ("c" + 1)) FROM "t1";'
assert bql2sql('estimate probability density of weight = f(c)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, "f"("c")) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 12) to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 12)),' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) in the context of age'
' from p1') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 5) to (height = 7 and age < 10)'
' in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)),' \
' (SELECT _rowid_ FROM "t1" WHERE (("height" = 7) AND ("age" < 10))),' \
' 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Cannot use all variables for similarity.
bql2sql(
'estimate similarity to (rowid = 5) in the context of * from p1;')
assert bql2sql('estimate similarity to (rowid = 5)'
' in the context of age from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql('estimate dependence probability of age with weight'
' from p1;') == \
'SELECT bql_column_dependence_probability(1, NULL, NULL, 2, 3) '\
'FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity to (rowid=2) in the context of r by p1')
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity in the context of r within p1')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability from p1;')
assert bql2sql('estimate mutual information of age with weight' +
' from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
assert bql2sql('estimate mutual information of age with weight' +
' using 42 samples from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', 42)'\
' FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age using 42 samples'
' from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information using 42 samples from p1;')
# XXX Should be SELECT, not ESTIMATE, here?
assert bql2sql('estimate correlation of age with weight from p1;') == \
'SELECT bql_column_correlation(1, NULL, NULL, 2, 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation from p1;')
with pytest.raises(BQLError):
# Variable must exist.
bql2sql('estimate correlation with agee from variables of p1')
def test_predict_outside_infer():
with pytest.raises(bayeslite.BQLError):
# No PREDICT outside INFER.
bql2sql('estimate predict age with confidence 0.9 from p1;')
def test_infer_explicit_predict_confidence():
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL) FROM "t1";'
def test_infer_explicit_predict_confidence_nsamples():
assert bql2sql('infer explicit'
' predict age with confidence 0.9 using 42 samples'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42) FROM "t1";'
def test_infer_explicit_verbatim_and_predict_confidence():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence():
assert bql2sql('infer explicit rowid, age,'
' predict age from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_auto():
assert bql2sql('infer rowid, age, weight from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_nsamples():
assert bql2sql('infer rowid, age, weight using (1+2) samples from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, (1 + 2)))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0, (1 + 2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using sqrt(2) samples'
' from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9,' \
' "sqrt"(2)))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' "sqrt"(2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'\
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where ifnull(label, predict label with confidence 0.7)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, NULL))' \
' = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where ifnull(label, predict label with confidence 0.7'
' using 73 samples)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, 73))' \
' = \'foo\');'
def test_infer_auto_star():
assert bql2sql('infer rowid, * from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, NULL))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_star_nsamples():
assert bql2sql('infer rowid, * using 1 samples from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, 1))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, 1))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, 1))' \
' AS "weight"' \
' FROM "t1";'
def test_estimate_columns_trivial():
prefix0 = 'SELECT v.name AS name'
prefix1 = ' FROM bayesdb_variable AS v' \
' WHERE v.population_id = 1' \
' AND v.generator_id IS NULL'
prefix = prefix0 + prefix1
assert bql2sql('estimate * from columns of p1;') == \
prefix + ';'
assert bql2sql('estimate * from columns of p1 where' +
' (probability density of value 42) > 0.5') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 42) > 0.5);'
assert bql2sql('estimate * from columns of p1'
' where (probability density of value 8)'
' > (probability density of age = 16)') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 8) >' \
' bql_pdf_joint(1, NULL, NULL, 2, 16));'
assert bql2sql('estimate *, probability density of value 8 given (age = 8)'
' from columns of p1;') == \
prefix0 + \
', bql_column_value_probability(1, NULL, NULL, v.colno, 8, 2, 8)' + \
prefix1 + ';'
with pytest.raises(bayeslite.BQLError):
bql2sql('estimate probability density of value 8 given (agee = 8)'
' from columns of p1')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' predictive probability of x > 0;')
with pytest.raises(bayeslite.BQLError):
# SIMILARITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' similarity to (rowid = x) in the context of c > 0;')
assert bql2sql('estimate * from columns of p1 where' +
' dependence probability with age > 0.5;') == \
prefix + \
' AND (bql_column_dependence_probability(1, NULL, NULL, 2, v.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where dependence probability > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with age;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2]\','\
' \'[\' || v.colno || \']\', NULL);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label)'
' given (weight=12) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42, 3, 12);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where mutual information > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight using 42 samples;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' mutual information using 42 samples > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' correlation with age desc;') == \
prefix + ' ORDER BY bql_column_correlation(1, NULL, NULL, 2, v.colno)' \
' DESC;'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' correlation of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where correlation > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate * from columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with weight as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with (age, weight) as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[2, 3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
# XXX This mixes up target and reference variables, which is OK,
# because MI is symmetric, but...oops.
assert bql2sql('estimate * from variables of p1'
' where probability of (mutual information with age < 0.1)'
' > 0.8') == \
prefix + \
' AND ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']'))) > 0.8);"
assert bql2sql('estimate * from variables of p1'
' order by probability of (mutual information with age < 0.1)') ==\
prefix + \
' ORDER BY (SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']')));"
def test_estimate_pairwise_trivial():
prefix = 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1, '
infix = ' AS value'
infix0 = ' FROM bayesdb_population AS p,'
infix0 += ' bayesdb_variable AS v0,'
infix0 += ' bayesdb_variable AS v1'
infix0 += ' WHERE p.id = 1'
infix0 += ' AND v0.population_id = p.id AND v1.population_id = p.id'
infix0 += ' AND v0.generator_id IS NULL'
infix0 += ' AND v1.generator_id IS NULL'
infix += infix0
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1;') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno,'\
' v1.colno)' + \
infix + ';'
assert bql2sql('estimate mutual information'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL, '\
'\'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
assert bql2sql('estimate mutual information given (label=\'go\', weight)'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL,'\
' 1, \'go\', 3, NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# PROBABILITY DENSITY OF VALUE is 1-column.
bql2sql('estimate correlation from pairwise columns of p1 where' +
' (probability density of value 0) > 0.5;')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY OF is a row function.
bql2sql('estimate dependence probability'
' from pairwise columns of p1' +
' where predictive probability of x > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where dependence probability > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' \
' (bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight using 42 samples'
' > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where mutual information with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where mutual information with weight using 42 samples > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL) > 0.5);'
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information using 42 samples > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', 42) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where correlation of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where correlation with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where correlation with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where correlation > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate dependence probability as depprob,'
' mutual information as mutinf'
' from pairwise columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno, v1.colno)' \
' AS "depprob",' \
' bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'\
' AS "mutinf"' \
+ infix0 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
def test_estimate_pairwise_row():
prefix = 'SELECT r0._rowid_ AS rowid0, r1._rowid_ AS rowid1'
infix = ' AS value FROM "t1" AS r0, "t1" AS r1'
assert bql2sql('estimate similarity in the context of age' +
' from pairwise p1;') == \
prefix + ', bql_row_similarity(1, NULL, NULL,'\
' r0._rowid_, r1._rowid_, 2)' + \
infix + ';'
with pytest.raises(bayeslite.BQLError):
# PREDICT is a 1-row function.
bql2sql('estimate predict age with confidence 0.9 from pairwise t1;')
def test_estimate_pairwise_selected_columns():
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1 for label, age') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL,' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' for (ESTIMATE * FROM COLUMNS OF p1'
' ORDER BY name DESC LIMIT 2)') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (3, 1) AND v1.colno IN (3, 1);'
def test_select_columns_subquery():
assert bql2sql('select id, t1.(estimate * from columns of p1'
' order by name asc limit 2) from t1') == \
'SELECT "id", "t1"."age", "t1"."label" FROM "t1";'
@pytest.mark.xfail(strict=True, reason='no simulate vars from models of')
def test_simulate_models_columns_subquery():
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT * FROM "bayesdb_temp_0";'
assert bql2sql('simulate 0, weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT 0, "v0" AS "weight", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1), "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1 AS wp1,'
' t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1) AS "wp1", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
def test_simulate_columns_subquery():
# XXX This test is a little unsatisfactory -- we do not get to see
# what the variables in the result are named...
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10') == \
'SELECT * FROM "bayesdb_temp_0";'
with pytest.raises(parse.BQLParseError):
# Compound columns not yet implemented for SIMULATE.
bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10')
def test_simulate_models():
# Base case.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Multiple target variables.
assert bql2sql('simulate mutual information of (label, age) with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1, 2]'" \
" AND reference_vars = '[3]';"
# Multiple reference variables.
assert bql2sql('simulate mutual information of age with (label, weight)'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[1, 3]';"
# Specified number of samples.
assert bql2sql('simulate mutual information of age with weight'
' using 42 samples from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
' AND nsamples = 42;'
# Conditional.
assert bql2sql('simulate mutual information of age with weight'
" given (label = 'foo') from models of p1") == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
" AND conditions = '{\"1\": \"foo\"}';"
# Modeled by a specific generator.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1 modeled by g1',
lambda bdb: bdb.execute('create generator g1 for p1')) == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
' AND generator_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Two mutual informations.
assert bql2sql('simulate mutual information of age with weight AS "mi(aw)",'
' mutual information of label with weight AS "mi(lw)"'
' from models of p1') == \
'SELECT t0."mi(aw)" AS "mi(aw)", t1."mi(lw)" AS "mi(lw)"' \
' FROM (SELECT _rowid_, mi AS "mi(aw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]') AS t0," \
' (SELECT _rowid_, mi AS "mi(lw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1]'" \
" AND reference_vars = '[3]') AS t1" \
' WHERE t0._rowid_ = t1._rowid_;'
def test_probability_of_mutinf():
assert bql2sql('estimate probability of'
' (mutual information of age with weight < 0.1) > 0.5'
' within p1') == \
'SELECT ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'))) > 0.5);"
def test_modeledby_usingmodels_trival():
def setup(bdb):
bdb.execute('create generator m1 for p1 using cgpm;')
assert bql2sql('estimate predictive probability of weight + 1'
' from p1 modeled by m1 using models 1-3, 5;', setup=setup) == \
'SELECT (bql_row_column_predictive_probability(1, 1, \'[1, 2, 3, 5]\','\
' _rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql(
'infer rowid, age, weight from p1 modeled by m1 using model 7',
setup=setup) == \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, 1, \'[7]\', _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, 1, \'[7]\', _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1 using models 0, 3-5;',
setup=setup) == \
'SELECT bql_predict(1, NULL, \'[0, 3, 4, 5]\', _rowid_, 2, 0.9, NULL)'\
' FROM "t1";'
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1 modeled by m1 using models 8, 10-12
''', setup=setup) == \
'SELECT bql_row_predictive_relevance(1, 1, \'[8, 10, 11, 12]\', ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate dependence probability
from pairwise columns of p1
for label, age
modeled by m1
using models 1, 4, 12
''', setup=setup) == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, 1, \'[1, 4, 12]\',' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND (v0.generator_id IS NULL OR v0.generator_id = 1)' \
' AND (v1.generator_id IS NULL OR v1.generator_id = 1)' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('''
estimate mutual information of age with weight
from p1 modeled by m1 using model 1;
''', setup=setup) == \
'SELECT bql_column_mutual_information('\
'1, 1, \'[1]\', \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
def test_simulate_columns_all():
with pytest.raises(parse.BQLParseError):
bql2sql('simulate * from p1 limit 1')
def test_trivial_commands():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# XXX Query parameters!
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with open(fname, 'rU') as f:
with pytest.raises(ValueError):
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True,
ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
with pytest.raises(ValueError):
guess.bayesdb_guess_population(bdb, 'p', 't')
guess.bayesdb_guess_population(bdb, 'p', 't', ifnotexists=True)
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-2 from p_cc')
bdb.execute('drop models 0-1 from p_cc')
with bdb.savepoint():
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models 0-1 from p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-1 from p_cc')
bdb.execute('initialize 2 models for p_cc')
bdb.execute('initialize 1 model if not exists for p_cc')
bdb.execute('initialize 2 models if not exists for p_cc')
population_id = core.bayesdb_get_population(bdb, 'p')
generator_id = core.bayesdb_get_generator(bdb, population_id, 'p_cc')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to T')
assert core.bayesdb_generator_table(bdb, generator_id) == 'T'
bdb.execute('alter population p rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('alter population p2 rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('estimate count(*) from p').fetchall()
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter generator p_cc rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('alter generator p0_cc rename to zot, rename to P0_CC')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_CC'
bdb.execute('alter generator P0_cc rename to P0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_cc'
bdb.execute('alter generator p0_CC rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('estimate count(*) from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate count(*) from p_cc')
bdb.execute('alter generator p0_cc rename to P0_cc')
bdb.execute('analyze p0_cc for 1 iteration')
colno = core.bayesdb_variable_number(bdb, population_id, generator_id,
'gender')
with pytest.raises(parse.BQLParseError):
# Rename the table's columns, not the generator's columns.
bdb.execute('alter generator p0_cc rename gender to sex')
with pytest.raises(NotImplementedError): # XXX
bdb.execute('alter table t rename to t0, rename gender to sex')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('analyze p0_cc model 0 for 1 iteration')
bdb.execute('alter generator p0_cc rename to p_cc')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('select sex from t0').fetchall()
with pytest.raises(AssertionError): # XXX
bdb.execute('select gender from t0')
assert False, 'Need to fix quoting of unknown columns!'
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict sex with confidence 0.9'
' from p').fetchall()
bdb.execute('infer explicit predict sex with confidence 0.9'
' from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict gender with confidence 0.9'
' from p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict gender with confidence 0.9'
' from p')
bdb.execute('alter table t0 rename sex to gender')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'gender') \
== colno
bdb.execute('alter generator p0_cc rename to p_cc') # XXX
bdb.execute('alter table t rename to T0') # XXX
bdb.sql_execute('create table t0_temp(x)')
bdb.execute('alter table T0 rename to t0')
assert bdb.execute('select count(*) from t0_temp').fetchvalue() == 0
assert bdb.execute('select count(*) from t0').fetchvalue() > 0
with pytest.raises(bayeslite.BQLError):
# Cannot specify models with rename.
bdb.execute('alter generator p_cc models (1) rename to p_cc_fail')
bdb.execute('drop table T0_TEMP')
bdb.execute('analyze p_cc model 0 for 1 iteration')
bdb.execute('analyze p_cc model 1 for 1 iteration')
bdb.execute('analyze p_cc models 0-1 for 1 iteration')
bdb.execute('analyze p_cc models 0,1 for 1 iteration')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('select * from t0').fetchall()
bdb.execute('select * from T0').fetchall()
bdb.execute('estimate * from p').fetchall()
bdb.execute('estimate * from P').fetchall()
# SIMIARITY IN THE CONTEXT OF requires exactly 1 variable.
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of * '
'from pairwise p').fetchall()
bdb.execute('estimate similarity in the context of age '
'from pairwise p').fetchall()
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('estimate similarity to (rowid=1) in the context of rank '
'from p2').fetchall()
bdb.execute('select value from'
' (estimate correlation from pairwise columns of p2)').fetchall()
bdb.execute('infer explicit predict age with confidence 0.9'
' from p2').fetchall()
bdb.execute('infer explicit predict AGE with confidence 0.9'
' from P2').fetchall()
bdb.execute('infer explicit predict aGe with confidence 0.9'
' from P2').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict agee with confidence 0.9 from p2')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee with confidence 0.9'
' from p2')
guess.bayesdb_guess_population(bdb, 'pe', 't0',
overrides=[
('age', 'numerical'),
('rank', 'numerical'),
])
bdb.execute('create generator pe_cc for pe;')
with pytest.raises(bayeslite.BQLError):
# No models to analyze.
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('initialize 1 model if not exists for pe_cc')
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('estimate correlation'
' from pairwise columns of pe').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 4 models if not exists for t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('analyze t0 for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate correlation from pairwise columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of age '
'from pairwise t')
bdb.execute('initialize 6 models if not exists for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
def test_trivial_deadline():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 second')
def test_parametrized():
assert bql2sqlparam('select * from t where id = ?') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = :foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = $foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = @foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = ?123') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where a = $foo and b = ?1;') == \
'SELECT * FROM "t" WHERE (("a" = ?1) AND ("b" = ?1));'
assert bql2sqlparam('select * from t' +
' where a = ?123 and b = :foo and c = ?124') == \
'SELECT * FROM "t" WHERE' + \
' ((("a" = ?1) AND ("b" = ?2)) AND ("c" = ?2));'
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
assert bql_execute(bdb, 'select count(*) from t') == [(7,)]
assert bql_execute(bdb, 'select count(distinct division) from t') == \
[(6,)]
assert bql_execute(bdb, 'select * from t where height > ?', (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select * from t where height > ?123',
(0,)*122 + (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select age from t where division = :division',
{':division': 'sales'}) == \
[(34,), (30,)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > ?;',
(40, 4)) == \
[('accounting',)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > :rank;',
{':RANK': 4, '@aGe': 40}) == \
[('accounting',)]
with pytest.raises(ValueError):
bdb.execute('select * from t where age < ? and rank > :r',
{':r': 4})
def traced_execute(query, *args):
bql = []
def trace(string, _bindings):
bql.append(' '.join(string.split()))
bdb.trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.untrace(trace)
return bql
def sqltraced_execute(query, *args):
sql = []
def trace(string, _bindings):
sql.append(' '.join(string.split()))
bdb.sql_trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.sql_untrace(trace)
return sql
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc;')
assert traced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'estimate similarity to (rowid = 1)' \
' in the context of (estimate * from columns of p limit 1)' \
' from p;',
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT v.name AS name FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT 1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population'
' WHERE id = ?',
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual '
'WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator '
'WHERE generator_id = ?'
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit ?)'
' from p;',
(1,)) == [
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
# ESTIMATE * FROM COLUMNS OF:
'SELECT v.name AS name'
' FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT ?1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
# ESTIMATE SIMILARITY TO (rowid=1):
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?'
]
assert sqltraced_execute(
'create temp table if not exists sim as '
'simulate age, RANK, division '
'from p given gender = \'F\' limit 4') == [
'PRAGMA table_info("sim")',
'PRAGMA table_info("bayesdb_temp_0")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator'
' WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_0"'
' ("age","RANK","division")',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'CREATE TEMP TABLE IF NOT EXISTS "sim" AS'
' SELECT * FROM "bayesdb_temp_0"',
'DROP TABLE "bayesdb_temp_0"'
]
assert sqltraced_execute(
'select * from (simulate age from p '
'given gender = \'F\' limit 4)') == [
'PRAGMA table_info("bayesdb_temp_1")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_1" ("age")',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'SELECT * FROM (SELECT * FROM "bayesdb_temp_1")',
'DROP TABLE "bayesdb_temp_1"',
]
bdb.execute('''
create population q for t (
age NUMERICAL;
gender NOMINAL; -- Not binary!
salary NUMERICAL;
height NUMERICAL;
division NOMINAL;
rank NOMINAL;
)
''')
bdb.execute('create generator q_cc for q;')
bdb.execute('initialize 1 model for q_cc;')
assert sqltraced_execute('analyze q_cc for 1 iteration;') == [
'SELECT COUNT(*) FROM bayesdb_generator WHERE name = ?',
'SELECT id FROM bayesdb_generator WHERE name = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT engine_json, engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'UPDATE bayesdb_cgpm_generator'
' SET engine_json = :engine_json, engine_stamp = :engine_stamp'
' WHERE generator_id = :generator_id']
def test_create_table_ifnotexists_as_simulate():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# If not exists table tests
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('''
create table if not exists u as
simulate age from p limit 10
''')
bdb.execute("drop table u")
bdb.execute('''
create table if not exists w as simulate age from p
given division='sales' limit 10
''')
bdb.execute("drop table w")
bdb.execute("create table u as simulate age from p limit 10")
x = bdb.execute("select count (*) from u").fetchvalue()
bdb.execute('''
create table if not exists u as simulate age from p limit 10
''')
bdb.execute('''
create table if not exists u as simulate age from p
given division='sales' limit 10
''')
assert x == bdb.execute("select count (*) from u").fetchvalue()
def test_createtab():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with pytest.raises(apsw.SQLError):
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
bdb.execute('drop generator if exists p_cc')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with bdb.savepoint():
# Savepoint because we don't actually want the new data to
# be inserted.
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True, ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
with pytest.raises(bayeslite.BQLError):
# Redefining population.
bdb.execute('create population p for t (age numerical)')
with pytest.raises(bayeslite.BQLError):
# Redefining generator.
bdb.execute('create generator p_cc for p;')
# Make sure ignore columns work.
#
# XXX Also check key columns.
guess.bayesdb_guess_population(bdb, 'p0', 't',
overrides=[('age', 'ignore')])
bdb.execute('drop population p0')
population_id = core.bayesdb_get_population(bdb, 'p')
colno = core.bayesdb_variable_number(bdb, population_id, None, 'age')
assert core.bayesdb_variable_stattype(
bdb, population_id, None, colno) == 'numerical'
bdb.execute('initialize 1 model for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
bdb.execute('drop generator if exists p_cc')
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute("create table u as select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
with pytest.raises(bayeslite.BQLError):
bdb.execute("create table u as select * from t where gender = 'F'")
bdb.execute('drop table u')
with pytest.raises(apsw.SQLError):
bql_execute(bdb, 'select * from u')
bdb.execute("create temp table u as"
" select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
# XXX Test to make sure TEMP is passed through, and the table
# doesn't persist on disk.
def test_alterpop_addvar():
with bayeslite.bayesdb_open() as bdb:
bayeslite.bayesdb_read_csv(
bdb, 't', StringIO.StringIO(test_csv.csv_data),
header=True, create=True)
bdb.execute('''
create population p for t with schema(
age numerical;
gender nominal;
salary numerical;
height ignore;
division ignore;
rank ignore;
)
''')
population_id = core.bayesdb_get_population(bdb, 'p')
bdb.execute('create generator m for p;')
# Fail when variable does not exist in base table.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable quux;')
# Fail when variable already in population.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable age numerical;')
# Fail when given invalid statistical type.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable heigh numr;')
# Alter pop with stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'height')
bdb.execute('alter population p add variable height numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'height')
# Alter pop multiple without stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert not core.bayesdb_has_variable(
bdb, population_id, None, 'division')
bdb.execute('''
alter population p
add variable rank,
add variable division;
''')
assert core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert core.bayesdb_has_variable(bdb, population_id, None, 'division')
# Add a new column weight to the base table.
bdb.sql_execute('alter table t add column weight real;')
# Fail when no values in new column.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable weight numerical;')
assert not core.bayesdb_has_variable(bdb, population_id, None, 'weight')
# Update a single value and update the population.
bdb.sql_execute('update t set weight = 1 where oid = 1;')
bdb.execute('alter population p add variable weight numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'weight')
def test_txn():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# Make sure rollback and commit fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll roll back.
bdb.execute('BEGIN')
try:
# Make sure transactions don't nest. (Use savepoints.)
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('ROLLBACK')
# Make sure rollback and commit still fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll commit.
bdb.execute('BEGIN')
try:
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Make sure ROLLBACK undoes the effects of the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('ROLLBACK')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
# Make sure CREATE and DROP both work in the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE and DROP work even if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE persists if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('COMMIT')
bdb.execute('SELECT * FROM t').fetchall()
bdb.execute('ESTIMATE * FROM p').fetchall()
# Make sure bdb.transaction works, rolls back on exception,
# and handles nesting correctly in the context of savepoints.
try:
with bdb.transaction():
bdb.sql_execute('create table quagga(x)')
raise StopIteration
except StopIteration:
pass
with pytest.raises(apsw.SQLError):
bdb.execute('select * from quagga')
with bdb.transaction():
with bdb.savepoint():
with bdb.savepoint():
pass
with bdb.savepoint():
with pytest.raises(bayeslite.BayesDBTxnError):
with bdb.transaction():
pass
# XXX To do: Make sure other effects (e.g., analysis) get
# rolled back by ROLLBACK.
def test_predprob_null():
backend = CGPM_Backend({}, multiprocess=False)
with test_core.bayesdb(backend=backend) as bdb:
bdb.sql_execute('''
create table foo (
id integer primary key not null,
x numeric,
y numeric,
z numeric
)
''')
bdb.sql_execute("insert into foo values (1, 1, 'strange', 3)")
bdb.sql_execute("insert into foo values (2, 1.2, 'strange', 1)")
bdb.sql_execute("insert into foo values (3, 0.8, 'strange', 3)")
bdb.sql_execute("insert into foo values (4, NULL, 'strange', 9)")
bdb.sql_execute("insert into foo values (5, 73, 'up', 11)")
bdb.sql_execute("insert into foo values (6, 80, 'up', -1)")
bdb.sql_execute("insert into foo values (7, 60, 'up', NULL)")
bdb.sql_execute("insert into foo values (8, 67, NULL, NULL)")
bdb.sql_execute("insert into foo values (9, 3.1415926, 'down', 1)")
bdb.sql_execute("insert into foo values (10, 1.4142135, 'down', 0)")
bdb.sql_execute("insert into foo values (11, 2.7182818, 'down', -1)")
bdb.sql_execute("insert into foo values (12, NULL, 'down', 10)")
bdb.execute('''
create population pfoo for foo (
id ignore;
x numerical;
y nominal;
z numerical;
)
''')
bdb.execute('create generator pfoo_cc for pfoo using cgpm;')
bdb.execute('initialize 1 model for pfoo_cc')
bdb.execute('analyze pfoo_cc for 1 iteration')
# Null value => null predictive probability.
assert bdb.execute('estimate predictive probability of x'
' from pfoo where id = 4;').fetchall() == \
[(None,)]
# Nonnull value => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 5').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All null values => null predictive probability.
assert bdb.execute('estimate predictive probability of (y, z)'
' from pfoo where id = 8;').fetchall() == \
[(None,)]
# Some nonnull values => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of (x, z)'
' from pfoo where id = 8;').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All NULL constraints => same result regardless of given clause.
c0 = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 8;')
v0 = cursor_value(c0)
assert v0 is not None
c1 = bdb.execute('estimate predictive probability of x given (y, z)'
' from pfoo where id = 8;')
v1 = cursor_value(c1)
assert relerr(v0, v1) < 0.0001
def test_guess_all():
with test_core.bayesdb() as bdb:
bdb.sql_execute('create table foo (x numeric, y numeric, z numeric)')
bdb.sql_execute('insert into foo values (1, 2, 3)')
bdb.sql_execute('insert into foo values (4, 5, 6)')
# XXX GUESS(*)
guess.bayesdb_guess_population(bdb, 'pfoo', 'foo')
def test_misc_errors():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(bayeslite.BQLError):
bdb.execute('create table t1 as SELECT 1 FROM t1'
# t1 already exists as a table.
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('create table t1 as simulate weight from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1x does not exist as a population.
bdb.execute('create table t1_sim as simulate weight from t1x'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable waught.
bdb.execute('create table t1_sim as simulate waught from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable agee.
bdb.execute('create table t1_sim as simulate weight from p1'
' given agee = 42 limit 1')
with bdb.savepoint():
bdb.sql_execute('create table t2(x)')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('alter table t2 rename to t1')
with pytest.raises(NotImplementedError):
# Renaming columns is not yet implemented.
bdb.execute('alter table t1 rename weight to mass')
with pytest.raises(bayeslite.BQLError):
# xcat does not exist as a backend.
bdb.execute('create generator p1_xc for p1 using xcat()')
with pytest.raises(bayeslite.BQLError):
# p1 already exists as a population.
bdb.execute('create generator p1_cc for p1;')
with pytest.raises(bayeslite.BQLError):
# multinomial is not a known statistical type.
bdb.execute('''
create population q1 for t1(
ignore id, label, weight;
weight multinomial
)
''')
with pytest.raises(bayeslite.BQLError):
# p1_xc does not exist as a generator.
bdb.execute('alter generator p1_xc rename to p1_xcat')
with bdb.savepoint():
bdb.execute('create generator p1_xc for p1;')
with pytest.raises(bayeslite.BQLError):
# p1_xc already exists as a generator.
bdb.execute('alter generator p1_cc rename to p1_xc')
with pytest.raises(bayeslite.BQLParseError):
# WAIT is not allowed.
bdb.execute('analyze p1_cc for 1 iteration wait')
with bdb.savepoint():
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('initialize 1 model for p1_xc')
bdb.execute('analyze p1_xc for 1 iteration')
with pytest.raises(apsw.SQLError):
bdb.execute('select'
' nonexistent((simulate age from p1 limit 1));')
with pytest.raises(ValueError):
bdb.execute('select :x', {'y': 42})
with pytest.raises(ValueError):
bdb.execute('select :x', {'x': 53, 'y': 42})
with pytest.raises(ValueError):
bdb.execute('select ?, ?', (1,))
with pytest.raises(ValueError):
bdb.execute('select ?', (1, 2))
with pytest.raises(TypeError):
bdb.execute('select ?', 42)
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac, *'
' from p1')
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac,'
' t1.(select age from t1 limit 1) from p1')
with pytest.raises(bayeslite.BQLError):
try:
bdb.execute('estimate similarity to (rowid=1)'
' in the context of agee from p1')
except bayeslite.BQLError as e:
assert 'No such columns in population:' in str(e)
raise
def test_nested_simulate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('select (simulate age from p1 limit 1),'
' (simulate weight from p1 limit 1)').fetchall()
assert bdb.temp_table_name() == 'bayesdb_temp_2'
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_0')
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_1')
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').fetchall()
# Make sure unwinding doesn't raise an exception. Calling
# __del__ directly, rather than via del(), has two effects:
#
# (a) It actually raises any exceptions in the method, unlike
# del(), which suppresses them.
#
# (b) It may cause a subsequent __del__ to fail and raise an
# exception, so that a subsequent del(), including an implicit
# one at the end of a scope, may print a message to stderr.
#
# Effect (a) is what we are actually trying to test. Effect
# (b) is a harmless consequence as far as pytest is concerned,
# as long as the test otherwise passes.
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').__del__()
def test_checkpoint__ci_slow():
with test_core.t1() as (bdb, population_id, generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations checkpoint 1 iteration')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 seconds checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 iterations checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration checkpoint 2 iterations')
def test_infer_confidence__ci_slow():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit rowid, rowid as another_rowid, 4,'
' age, predict age as age_inf confidence age_conf'
' from p1').fetchall()
def test_infer_as_estimate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit predictive probability of age'
' from p1').fetchall()
def test_infer_error():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('infer explicit predict age confidence age_conf'
' from p1').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee confidence age_conf'
' from p1').fetchall()
def test_estimate_by():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predictive probability of age'
' by p1')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity to (rowid=1) '
'in the context of age by p1')
def check(x, bindings=None):
assert len(bdb.execute(x, bindings=bindings).fetchall()) == 1
check('estimate probability density of age = 42 by p1')
check('estimate dependence probability of age with weight by p1')
check('estimate mutual information of age with weight by p1')
check('estimate correlation of age with weight by p1')
check('estimate correlation pvalue of age with weight by p1')
rowid = bdb.execute('select min(rowid) from t1').fetchall()[0][0]
check('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of weight by p1
''', (rowid, rowid,))
def test_empty_cursor():
with bayeslite.bayesdb_open() as bdb:
assert bdb.execute('SELECT 0').connection == bdb
empty(bdb.execute('BEGIN'))
empty(bdb.execute('COMMIT'))
empty(bdb.sql_execute('CREATE TABLE t(x, y, z)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(1,2,3)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(4,5,6)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(7,8,9)'))
empty(bdb.execute('CREATE POPULATION p FOR t '
'(IGNORE z,y; x NOMINAL)'))
empty(bdb.execute('CREATE GENERATOR p_cc FOR p;'))
empty(bdb.execute('INITIALIZE 1 MODEL FOR p_cc'))
empty(bdb.execute('DROP GENERATOR p_cc'))
empty(bdb.execute('DROP POPULATION p'))
empty(bdb.execute('DROP TABLE t'))
def test_create_generator_ifnotexists():
# XXX Test other backends too, because they have a role in ensuring that
# this works. Their create_generator will still be called.
#
# [TRC 20160627: The above comment appears to be no longer true --
# if it was ever true.]
for using_clause in ('cgpm()',):
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE t(x, y, z)')
bdb.sql_execute('INSERT INTO t VALUES(1,2,3)')
bdb.execute('''
CREATE POPULATION p FOR t (
x NUMERICAL;
y NUMERICAL;
z NOMINAL;
)
''')
for _i in (0, 1):
bdb.execute('CREATE GENERATOR IF NOT EXISTS p_cc FOR p USING '
+ using_clause)
try:
bdb.execute('CREATE GENERATOR p_cc FOR p USING ' + using_clause)
assert False # Should have said it exists.
except bayeslite.BQLError:
pass
def test_bql_rand():
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
# These are "the" random numbers (internal PRNG is seeded to 0)
ans = [(0.28348770982811367,), (0.4789774612650598,), (0.07824908989551316,),
(0.6091223239372148,), (0.03906608409906187,), (0.3690599096081546,),
(0.8223420512129717,), (0.7777771914916722,), (0.061856771629497986,),
(0.6492586781908201,)]
assert rands == ans
def test_bql_rand2():
seed = struct.pack('<QQQQ', 0, 0, 0, 3)
with bayeslite.bayesdb_open(seed=seed) as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
ans = [(0.8351877951287725,), (0.9735099617243271,), (0.026142315910925418,),
(0.09380653289687524,), (0.1097050387582088,), (0.33154896906379605,),
(0.4579314980719317,), (0.09072802203491703,), (0.5276180968829105,),
(0.9993280772797679,)]
assert rands == ans
class MockTracerOneQuery(bayeslite.IBayesDBTracer):
def __init__(self, q, qid):
self.q = q
self.qid = qid
self.start_calls = 0
self.ready_calls = 0
self.error_calls = 0
self.finished_calls = 0
self.abandoned_calls = 0
def start(self, qid, query, bindings):
assert qid == self.qid
assert query == self.q
assert bindings == ()
self.start_calls += 1
def ready(self, qid, _cursor):
assert qid == self.qid
self.ready_calls += 1
def error(self, qid, _e):
assert qid == self.qid
self.error_calls += 1
def finished(self, qid):
assert qid == self.qid
self.finished_calls += 1
def abandoned(self, qid):
assert qid == self.qid
self.abandoned_calls += 1
def test_tracing_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM t1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
bdb.untrace(tracer)
# XXX Make sure the whole cursor API works.
q = 'SELECT 42'
tracer = MockTracerOneQuery(q, 2)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
assert cursor.fetchvalue() == 42
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
def test_tracing_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM wrong'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
with pytest.raises(apsw.SQLError):
bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 0
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
class Boom(Exception):
pass
class ErroneousBackend(troll.TrollBackend):
def __init__(self):
self.call_ct = 0
def name(self):
return 'erroneous'
def logpdf_joint(self, *_args, **_kwargs):
if self.call_ct > 10: # Wait to avoid raising during sqlite's prefetch
raise Boom()
self.call_ct += 1
return 0
def test_tracing_execution_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
bayeslite.bayesdb_register_backend(bdb, ErroneousBackend())
bdb.execute('DROP GENERATOR p1_cc')
bdb.execute('CREATE GENERATOR p1_err FOR p1 USING erroneous()')
q = 'ESTIMATE PREDICTIVE PROBABILITY OF age FROM p1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
with pytest.raises(Boom):
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
def test_pdf_var():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
c = bdb.execute(
'estimate probability density of label = label from p1')
c.fetchall()
assert bql2sql(
'estimate probability density of label = label from p1') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 1, "label") FROM "t1";'
| 47.713213
| 137
| 0.566762
| 15,353
| 127,108
| 4.572982
| 0.047483
| 0.044581
| 0.033956
| 0.037744
| 0.831631
| 0.799584
| 0.763335
| 0.723511
| 0.672264
| 0.634434
| 0
| 0.031179
| 0.309123
| 127,108
| 2,663
| 138
| 47.73113
| 0.768322
| 0.046763
| 0
| 0.578008
| 0
| 0.025726
| 0.485851
| 0.025282
| 0
| 0
| 0
| 0
| 0.127386
| 1
| 0.035685
| false
| 0.002905
| 0.007469
| 0.00083
| 0.047303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
68a71839020ce7ad671d78acb47c03813f6d2f0f
| 177
|
py
|
Python
|
django_private_beta/urls.py
|
andytwoods/Django-Private-Beta
|
aafebab96bc5126c78f76de68780fa03f9825191
|
[
"MIT"
] | null | null | null |
django_private_beta/urls.py
|
andytwoods/Django-Private-Beta
|
aafebab96bc5126c78f76de68780fa03f9825191
|
[
"MIT"
] | null | null | null |
django_private_beta/urls.py
|
andytwoods/Django-Private-Beta
|
aafebab96bc5126c78f76de68780fa03f9825191
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'private_beta'
urlpatterns = [
url(r'^private_beta/', views.PrivateBeta.as_view(), name='private_beta'),
]
| 19.666667
| 77
| 0.723164
| 25
| 177
| 4.92
| 0.64
| 0.268293
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 177
| 8
| 78
| 22.125
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0.214689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
68b37594fa5da7062f2322cb14055f192960a67e
| 56
|
py
|
Python
|
tests/schema/data/__init__.py
|
datapio/klander
|
d862bb1640a6cf4c0010246e1d53316103321a4d
|
[
"Apache-2.0"
] | 2
|
2021-05-14T22:00:55.000Z
|
2021-09-17T20:09:17.000Z
|
tests/schema/data/__init__.py
|
datapio/klander
|
d862bb1640a6cf4c0010246e1d53316103321a4d
|
[
"Apache-2.0"
] | null | null | null |
tests/schema/data/__init__.py
|
datapio/klander
|
d862bb1640a6cf4c0010246e1d53316103321a4d
|
[
"Apache-2.0"
] | 1
|
2021-07-16T08:35:43.000Z
|
2021-07-16T08:35:43.000Z
|
from .state_reconciler import *
from .response import *
| 18.666667
| 31
| 0.785714
| 7
| 56
| 6.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 32
| 28
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7a23bc8b1c197bcfdfd5c1b5353d51faa932b74
| 64
|
py
|
Python
|
videotracker/functions/__init__.py
|
lysogeny/videotracker
|
581e7e461525ed83c47fcbf7ff94749e6185691a
|
[
"MIT"
] | 1
|
2021-02-24T00:02:53.000Z
|
2021-02-24T00:02:53.000Z
|
videotracker/functions/__init__.py
|
lysogeny/videotracker
|
581e7e461525ed83c47fcbf7ff94749e6185691a
|
[
"MIT"
] | null | null | null |
videotracker/functions/__init__.py
|
lysogeny/videotracker
|
581e7e461525ed83c47fcbf7ff94749e6185691a
|
[
"MIT"
] | null | null | null |
from .functions import *
from . import abc
from . import params
| 16
| 24
| 0.75
| 9
| 64
| 5.333333
| 0.555556
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 64
| 3
| 25
| 21.333333
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d7e84f699f8edcb6ab82c3fe4a4ac65f9041434b
| 81
|
py
|
Python
|
crudlib/__init__.py
|
zxyle/TinyCRUD
|
1cdf858f435f0bab89b0ce423c0259d073cc371b
|
[
"MIT"
] | 2
|
2019-07-25T23:35:10.000Z
|
2019-08-14T13:09:41.000Z
|
crudlib/__init__.py
|
zxyle/TinyCRUD
|
1cdf858f435f0bab89b0ce423c0259d073cc371b
|
[
"MIT"
] | 8
|
2019-12-16T07:28:06.000Z
|
2020-09-13T10:29:06.000Z
|
crudlib/__init__.py
|
zxyle/TinyCRUD
|
1cdf858f435f0bab89b0ce423c0259d073cc371b
|
[
"MIT"
] | null | null | null |
from .mysql import MySQL
from .sqlite import SQLite
from .mariadb import MariaDB
| 20.25
| 28
| 0.814815
| 12
| 81
| 5.5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 3
| 29
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7efe8d5e4c2b2afe3b3f9460a06c41317be8270
| 153
|
py
|
Python
|
testdocker/cli/__init__.py
|
sip-li/testdocker
|
ab01ed57d052386017dcf33310c52766d6b3a3fb
|
[
"Apache-2.0"
] | null | null | null |
testdocker/cli/__init__.py
|
sip-li/testdocker
|
ab01ed57d052386017dcf33310c52766d6b3a3fb
|
[
"Apache-2.0"
] | null | null | null |
testdocker/cli/__init__.py
|
sip-li/testdocker
|
ab01ed57d052386017dcf33310c52766d6b3a3fb
|
[
"Apache-2.0"
] | null | null | null |
"""
testdocker.cli
~~~~~~~~~~~~~~
CLI interface package for testdocker.
:copyright: (c) 2017 by Joe Black.
:license: Apache2.
"""
from . import main
| 11.769231
| 37
| 0.633987
| 18
| 153
| 5.388889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039063
| 0.163399
| 153
| 12
| 38
| 12.75
| 0.71875
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
041772f7dc8d537acde9472a75cc08332defcaa2
| 173
|
py
|
Python
|
projects/TextDet/textdetection/__init__.py
|
AzeroGYH/detectron2_crpn
|
617d6a67a95945915e36e0fad4a7739331881bbe
|
[
"Apache-2.0"
] | null | null | null |
projects/TextDet/textdetection/__init__.py
|
AzeroGYH/detectron2_crpn
|
617d6a67a95945915e36e0fad4a7739331881bbe
|
[
"Apache-2.0"
] | null | null | null |
projects/TextDet/textdetection/__init__.py
|
AzeroGYH/detectron2_crpn
|
617d6a67a95945915e36e0fad4a7739331881bbe
|
[
"Apache-2.0"
] | null | null | null |
#
# Modified by GYH
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_textdetection_config
from .modeling import TextROIHeads
| 24.714286
| 70
| 0.791908
| 23
| 173
| 5.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144509
| 173
| 7
| 71
| 24.714286
| 0.912162
| 0.485549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
041bab6ee68b38136895b05d676af598dec6d4bb
| 10,708
|
py
|
Python
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 18
|
2021-04-18T03:51:22.000Z
|
2022-03-16T13:14:36.000Z
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 1
|
2021-05-04T14:27:02.000Z
|
2021-05-04T14:27:02.000Z
|
main.py
|
Aus-miner/Miner-Model
|
f7abc9f74cec00f82a2df6e359363670a64ad72f
|
[
"MIT"
] | 8
|
2021-05-03T19:24:19.000Z
|
2022-02-20T22:20:18.000Z
|
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
from agents import *
from generators import *
from CMDataLoader import CMDataLoader
from Simulator import Simulator
from plotutils import update_layout_wrapper
import config
import constants
import random
# my_palette = ["#264653","#9D1DC8","#287271", "#645DAC","#636EFA", "#ECA400","#FE484E","#8484E8", "#03b800" ,"#9251e1","#F4A261"]
# my_palette = ["#54478c","#9D1DC8","#2c699a","#048ba8","#0db39e","#16db93","#83e377","#b9e769","#efea5a","#f1c453","#f29e4c"]
my_palette = ["#1f00a7","#9d1dc8","#00589f","#009b86","#00a367","#67a300","#645dac","#eca400","#fd7e00","#b6322b", "#FE484E"]
hardware_palette = ["#009b86", "#9D1DC8"]
opex_palette = ["#9D1DC8","#264653","#8484E8"]
primary_color = ["#9d1dc8"]
def save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix):
pd.DataFrame({'price': prices, 'hashrate': global_hash_rate, 'trials': n_trials}).to_csv(f"plots/{file_suffix}/env_values_{file_suffix}.csv", index = False)
user_positions.to_csv(f"plots/{file_suffix}/user_values_{file_suffix}.csv", index = False)
def get_environment_plots(prices, global_hash_rate, n_trials, title_suffix):
price_fig = update_layout_wrapper(px.line(x = list(range(len(prices))), y = prices,
labels = {"y": "Price (USD)", "x": "Day"},
title = f"Simulated Bitcoin Price over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
hashrate_fig = update_layout_wrapper(px.line(x = list(range(len(global_hash_rate))), y = global_hash_rate,
labels = {"y": "Hash Rate (EH/s)", "x": "Day"},
title = f"Simulated Bitcoin Network Hash Rate over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
return (price_fig, hashrate_fig)
def get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette):
user_positions_e_c = user_positions.loc[user_positions.elec_cost == elec_cost]
long_btc_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Long BTC, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Selling Daily, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = my_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for elec_cost in user_positions.elec_cost.unique():
user_figs = get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
def get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette):
user_positions_m_t = user_positions.loc[user_positions.machine_type == machine_type.value]
long_btc_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Long BTC",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Selling Daily",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots_opex(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = opex_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for machine_type in user_machine_prices:
user_figs = get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{machine_type.value}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{machine_type.value}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
if __name__ == '__main__':
random.seed(1032009)
np.random.seed(1032009)
n_trials = 25
fee_params = CMDataLoader.get_historical_fee_params()
block_subsidy = 6.25
historical_price_params = CMDataLoader.get_historical_price_params()
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical")
bearish_price_params = (historical_price_params[0], -1 * abs(historical_price_params[1]), historical_price_params[2])
get_summary_plots(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish")
corrections_price_params = (historical_price_params[0], 0, historical_price_params[2] * 1.25)
get_summary_plots(corrections_price_params, fee_params, block_subsidy, n_trials, "in Bull Market with Corrections", "corrections")
s9_s19_prices = {key: config.machine_prices[key] for key in [constants.MachineName.ANTMINER_S9, constants.MachineName.ANTMINER_S19]}
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical-machines", s9_s19_prices, [0.03], hardware_palette)
get_summary_plots_opex(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish-opex", s9_s19_prices, [0.03, 0.04, 0.05], opex_palette)
| 67.345912
| 201
| 0.653063
| 1,319
| 10,708
| 4.912813
| 0.147081
| 0.031327
| 0.028086
| 0.024691
| 0.764352
| 0.750154
| 0.716512
| 0.716512
| 0.710494
| 0.705556
| 0
| 0.034305
| 0.240474
| 10,708
| 158
| 202
| 67.772152
| 0.762449
| 0.021573
| 0
| 0.467742
| 0
| 0
| 0.186376
| 0.054248
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.112903
| 0
| 0.185484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
045d818a85f7e417a15d81255c6e0bba62cc720a
| 26
|
py
|
Python
|
json_dbindex/models.py
|
peopledoc/django-json-dbindex
|
5fb497b22d185e349ac9485bd6217c5bccb295c4
|
[
"BSD-3-Clause"
] | 3
|
2015-04-19T11:56:53.000Z
|
2016-07-07T19:38:31.000Z
|
json_dbindex/models.py
|
peopledoc/django-json-dbindex
|
5fb497b22d185e349ac9485bd6217c5bccb295c4
|
[
"BSD-3-Clause"
] | 5
|
2015-04-01T14:51:06.000Z
|
2016-09-15T14:22:06.000Z
|
json_dbindex/models.py
|
peopledoc/django-json-dbindex
|
5fb497b22d185e349ac9485bd6217c5bccb295c4
|
[
"BSD-3-Clause"
] | 1
|
2015-10-26T14:04:29.000Z
|
2015-10-26T14:04:29.000Z
|
# No models for this apps
| 13
| 25
| 0.730769
| 5
| 26
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 26
| 1
| 26
| 26
| 0.95
| 0.884615
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f095f83d0fb998742b62734022052168214a7e83
| 87
|
py
|
Python
|
classroom/admin.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
classroom/admin.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
classroom/admin.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Class
admin.site.register(Class)
| 17.4
| 32
| 0.816092
| 13
| 87
| 5.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114943
| 87
| 4
| 33
| 21.75
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0b335de65dfd9a6e266026338baff29e86c20da
| 7,078
|
py
|
Python
|
a10sdk/core/cgnv6/cgnv6_ddos_protection.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 16
|
2015-05-20T07:26:30.000Z
|
2021-01-23T11:56:57.000Z
|
a10sdk/core/cgnv6/cgnv6_ddos_protection.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 6
|
2015-03-24T22:07:11.000Z
|
2017-03-28T21:31:18.000Z
|
a10sdk/core/cgnv6/cgnv6_ddos_protection.py
|
deepfield/a10sdk-python
|
bfaa58099f51f085d5e91652d1d1a3fd5c529d5d
|
[
"Apache-2.0"
] | 23
|
2015-03-29T15:43:01.000Z
|
2021-06-02T17:12:01.000Z
|
from a10sdk.common.A10BaseClass import A10BaseClass
class PacketsPerSecond(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip: {"description": "Configure packets-per-second threshold per IP(default 3000)", "format": "number", "default": 3000, "maximum": 30000000, "minimum": 0, "type": "number"}
:param udp: {"description": "Configure packets-per-second threshold per UDP port (default: 3000)", "format": "number", "default": 3000, "maximum": 30000000, "minimum": 0, "type": "number"}
:param other: {"description": "Configure packets-per-second threshold for other L4 protocols(default 10000)", "format": "number", "default": 10000, "maximum": 30000000, "minimum": 0, "type": "number"}
:param tcp: {"description": "Configure packets-per-second threshold per TCP port (default: 3000)", "format": "number", "default": 3000, "maximum": 30000000, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "packets-per-second"
self.DeviceProxy = ""
self.ip = ""
self.udp = ""
self.other = ""
self.tcp = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Logging(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param logging_toggle: {"default": "enable", "enum": ["enable", "disable"], "type": "string", "description": "'enable': Enable CGNV6 NAT pool DDoS protection logging (default); 'disable': Disable CGNV6 NAT pool DDoS protection logging; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "logging"
self.DeviceProxy = ""
self.logging_toggle = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "entry_added", "entry_deleted", "entry_added_to_hw", "entry_removed_from_hw", "hw_out_of_entries", "entry_match_drop", "entry_match_drop_hw", "entry_list_alloc", "entry_list_free", "entry_list_alloc_failure", "ip_node_alloc", "ip_node_free", "ip_node_alloc_failure", "ip_port_block_alloc", "ip_port_block_free", "ip_port_block_alloc_failure", "ip_other_block_alloc", "ip_other_block_free", "ip_other_block_alloc_failure", "entry_added_shadow", "entry_invalidated"], "type": "string", "description": "'all': all; 'entry_added': entry_added; 'entry_deleted': entry_deleted; 'entry_added_to_hw': entry_added_to_hw; 'entry_removed_from_hw': entry_removed_from_hw; 'hw_out_of_entries': hw_out_of_entries; 'entry_match_drop': entry_match_drop; 'entry_match_drop_hw': entry_match_drop_hw; 'entry_list_alloc': entry_list_alloc; 'entry_list_free': entry_list_free; 'entry_list_alloc_failure': entry_list_alloc_failure; 'ip_node_alloc': ip_node_alloc; 'ip_node_free': ip_node_free; 'ip_node_alloc_failure': ip_node_alloc_failure; 'ip_port_block_alloc': ip_port_block_alloc; 'ip_port_block_free': ip_port_block_free; 'ip_port_block_alloc_failure': ip_port_block_alloc_failure; 'ip_other_block_alloc': ip_other_block_alloc; 'ip_other_block_free': ip_other_block_free; 'ip_other_block_alloc_failure': ip_other_block_alloc_failure; 'entry_added_shadow': entry_added_shadow; 'entry_invalidated': entry_invalidated; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "sampling-enable"
self.DeviceProxy = ""
self.counters1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class DdosProtection(A10BaseClass):
""" :param toggle: {"description": "'enable': Enable CGNV6 NAT pool DDoS protection (default); 'disable': Disable CGNV6 NAT pool DDoS protection; ", "format": "enum", "default": "enable", "type": "string", "enum": ["enable", "disable"], "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "entry_added", "entry_deleted", "entry_added_to_hw", "entry_removed_from_hw", "hw_out_of_entries", "entry_match_drop", "entry_match_drop_hw", "entry_list_alloc", "entry_list_free", "entry_list_alloc_failure", "ip_node_alloc", "ip_node_free", "ip_node_alloc_failure", "ip_port_block_alloc", "ip_port_block_free", "ip_port_block_alloc_failure", "ip_other_block_alloc", "ip_other_block_free", "ip_other_block_alloc_failure", "entry_added_shadow", "entry_invalidated"], "type": "string", "description": "'all': all; 'entry_added': entry_added; 'entry_deleted': entry_deleted; 'entry_added_to_hw': entry_added_to_hw; 'entry_removed_from_hw': entry_removed_from_hw; 'hw_out_of_entries': hw_out_of_entries; 'entry_match_drop': entry_match_drop; 'entry_match_drop_hw': entry_match_drop_hw; 'entry_list_alloc': entry_list_alloc; 'entry_list_free': entry_list_free; 'entry_list_alloc_failure': entry_list_alloc_failure; 'ip_node_alloc': ip_node_alloc; 'ip_node_free': ip_node_free; 'ip_node_alloc_failure': ip_node_alloc_failure; 'ip_port_block_alloc': ip_port_block_alloc; 'ip_port_block_free': ip_port_block_free; 'ip_port_block_alloc_failure': ip_port_block_alloc_failure; 'ip_other_block_alloc': ip_other_block_alloc; 'ip_other_block_free': ip_other_block_free; 'ip_other_block_alloc_failure': ip_other_block_alloc_failure; 'entry_added_shadow': entry_added_shadow; 'entry_invalidated': entry_invalidated; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Configure CGNV6 DDoS Protection.
Class ddos-protection supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ddos-protection`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ddos-protection"
self.a10_url="/axapi/v3/cgnv6/ddos-protection"
self.DeviceProxy = ""
self.packets_per_second = {}
self.toggle = ""
self.logging = {}
self.uuid = ""
self.sampling_enable = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| 62.087719
| 1,609
| 0.709946
| 940
| 7,078
| 4.970213
| 0.132979
| 0.061644
| 0.053938
| 0.041096
| 0.791524
| 0.780394
| 0.767765
| 0.72881
| 0.680651
| 0.680651
| 0
| 0.017699
| 0.153857
| 7,078
| 113
| 1,610
| 62.637168
| 0.762398
| 0.773665
| 0
| 0.47619
| 0
| 0
| 0.057796
| 0.020833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.02381
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f0cf54098ed541ac733e9342aca4e178b689ee5d
| 166
|
py
|
Python
|
graphormer/data/__init__.py
|
shawnwang-tech/Graphormer
|
49286ac8093dcc165076c2a6cd1a5380749a48a7
|
[
"MIT"
] | 858
|
2021-06-12T14:50:56.000Z
|
2022-03-31T18:56:05.000Z
|
graphormer/data/__init__.py
|
ericdoug-qi/Graphormer
|
2e48f3fb52d25d505d0950f74a6016c5f9967c13
|
[
"MIT"
] | 77
|
2021-06-16T21:49:45.000Z
|
2022-03-31T06:27:40.000Z
|
graphormer/data/__init__.py
|
ericdoug-qi/Graphormer
|
2e48f3fb52d25d505d0950f74a6016c5f9967c13
|
[
"MIT"
] | 150
|
2021-06-12T15:11:42.000Z
|
2022-03-30T13:34:59.000Z
|
DATASET_REGISTRY = {}
def register_dataset(name: str):
def register_dataset_func(func):
DATASET_REGISTRY[name] = func()
return register_dataset_func
| 23.714286
| 39
| 0.728916
| 20
| 166
| 5.7
| 0.4
| 0.394737
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180723
| 166
| 6
| 40
| 27.666667
| 0.838235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f0e74c38028faeaf907ae03a7d395847719222f3
| 123
|
py
|
Python
|
gears/compressors/__init__.py
|
gears/gears
|
5729c2525a8c04c185e998bd9a86233708972921
|
[
"0BSD"
] | 9
|
2015-03-23T15:34:04.000Z
|
2021-03-19T03:03:48.000Z
|
gears/compressors/__init__.py
|
gears/gears
|
5729c2525a8c04c185e998bd9a86233708972921
|
[
"0BSD"
] | 2
|
2015-08-31T03:19:27.000Z
|
2016-01-20T09:54:01.000Z
|
gears/compressors/__init__.py
|
gears/gears
|
5729c2525a8c04c185e998bd9a86233708972921
|
[
"0BSD"
] | 3
|
2015-02-01T06:21:24.000Z
|
2015-07-30T02:31:31.000Z
|
from .base import BaseCompressor, ExecCompressor
from .cssmin import CSSMinCompressor
from .slimit import SlimItCompressor
| 30.75
| 48
| 0.861789
| 13
| 123
| 8.153846
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105691
| 123
| 3
| 49
| 41
| 0.963636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0b0ed3365bb8d7ffd3d84d792e2c43f7bc00ba2e
| 189
|
py
|
Python
|
pythonexercicios/ex008-mtr-cent-mil.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
pythonexercicios/ex008-mtr-cent-mil.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
pythonexercicios/ex008-mtr-cent-mil.py
|
marroni1103/exercicios-pyton
|
734162cc4b63ed30d754a6efe4c5622baaa1a50b
|
[
"MIT"
] | null | null | null |
m = float(input('Informe os metros: '))
print(f'{m} metros equivale a: \n{m*0.001}km\n{m*0.01}hm\n{m*0.1:.1f}dam\n{m*10:.0f}dm\n{m*100:.0f}cm\n{m*1000:.0f}mm')
#km, hm, dam, m, dm, cm, mm
| 37.8
| 119
| 0.597884
| 50
| 189
| 2.26
| 0.52
| 0.106195
| 0.079646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 0.100529
| 189
| 5
| 120
| 37.8
| 0.535294
| 0.137566
| 0
| 0
| 0
| 0.5
| 0.785276
| 0.527607
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9bc9fbb641a409c467750b8954175089dcd45894
| 83
|
py
|
Python
|
tests/tests/base_tests/__init__.py
|
zhouhanjiang/aws-device-farm-appium-python-tests-for-android-sample-app
|
d43b892baf5cc202732a59967ec258cbafef3c37
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/base_tests/__init__.py
|
zhouhanjiang/aws-device-farm-appium-python-tests-for-android-sample-app
|
d43b892baf5cc202732a59967ec258cbafef3c37
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/base_tests/__init__.py
|
zhouhanjiang/aws-device-farm-appium-python-tests-for-android-sample-app
|
d43b892baf5cc202732a59967ec258cbafef3c37
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from native_test import NativeTest
| 13.833333
| 34
| 0.674699
| 12
| 83
| 4.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.156627
| 83
| 5
| 35
| 16.6
| 0.771429
| 0.506024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
50207f9b9e42811e1d80f379d8051c0e63891e18
| 81
|
py
|
Python
|
brainda/algorithms/deep_learning/__init__.py
|
Mrswolf/brainda
|
cbd2fa6334d9e6243324dbaf086be4eb4047e801
|
[
"MIT"
] | 24
|
2021-03-05T14:33:43.000Z
|
2022-03-20T01:23:40.000Z
|
brainda/algorithms/deep_learning/__init__.py
|
ccc65535/brainda
|
366a1288bc0f1b835f78fe8dd6c53bcde631c1a5
|
[
"MIT"
] | 2
|
2021-03-10T05:34:05.000Z
|
2021-12-16T05:22:18.000Z
|
brainda/algorithms/deep_learning/__init__.py
|
ccc65535/brainda
|
366a1288bc0f1b835f78fe8dd6c53bcde631c1a5
|
[
"MIT"
] | 4
|
2021-04-02T12:33:04.000Z
|
2022-03-03T01:38:05.000Z
|
from .base import *
from .eegnet import EEGNet
from .shallownet import ShallowNet
| 27
| 34
| 0.814815
| 11
| 81
| 6
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 81
| 3
| 34
| 27
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac955887490ebdf12f0d48dc349f3c103239ddea
| 553
|
py
|
Python
|
projectreport/__init__.py
|
whoopnip/project-report
|
b08e78fd864ebd7f48443b2d58c89661c1adcab7
|
[
"MIT"
] | null | null | null |
projectreport/__init__.py
|
whoopnip/project-report
|
b08e78fd864ebd7f48443b2d58c89661c1adcab7
|
[
"MIT"
] | null | null | null |
projectreport/__init__.py
|
whoopnip/project-report
|
b08e78fd864ebd7f48443b2d58c89661c1adcab7
|
[
"MIT"
] | null | null | null |
"""
A set of tools for describing software projects. Finds software projects, analyzes them,
and outputs reports.
"""
from projectreport.analyzer.project import Project
from projectreport.analyzer.ts.github import GithubAnalysis
from projectreport.config import DEFAULT_IGNORE_PATHS
from projectreport.finder.combine import CombinedFinder
from projectreport.finder.git import GitFinder
from projectreport.finder.js import JavaScriptPackageFinder
from projectreport.finder.python import PythonPackageFinder
from projectreport.report.report import Report
| 42.538462
| 88
| 0.862568
| 66
| 553
| 7.19697
| 0.545455
| 0.286316
| 0.193684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090416
| 553
| 12
| 89
| 46.083333
| 0.944334
| 0.197107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ac9d90119133c88f80ee7a88fbb57a4aa23edebc
| 326
|
py
|
Python
|
franki/exceptions.py
|
cr0hn/franki
|
f375ad9f5f4fc233fc007242076d15063e754b2b
|
[
"BSD-3-Clause"
] | 1
|
2020-08-08T11:57:12.000Z
|
2020-08-08T11:57:12.000Z
|
franki/exceptions.py
|
cr0hn/franki
|
f375ad9f5f4fc233fc007242076d15063e754b2b
|
[
"BSD-3-Clause"
] | 2
|
2020-07-20T22:39:30.000Z
|
2021-09-02T12:00:29.000Z
|
franki/exceptions.py
|
cr0hn/franki
|
f375ad9f5f4fc233fc007242076d15063e754b2b
|
[
"BSD-3-Clause"
] | 1
|
2020-08-08T11:57:14.000Z
|
2020-08-08T11:57:14.000Z
|
class FrankiException(Exception):
pass
class FrankiInvalidFormatException(Exception):
pass
class FrankiFileNotFound(Exception):
pass
class FrankiInvalidFileFormat(Exception):
pass
__all__ = ("FrankiInvalidFormatException", "FrankiFileNotFound",
"FrankiInvalidFileFormat", "FrankiException")
| 17.157895
| 64
| 0.757669
| 21
| 326
| 11.571429
| 0.380952
| 0.213992
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162577
| 326
| 18
| 65
| 18.111111
| 0.89011
| 0
| 0
| 0.4
| 0
| 0
| 0.257669
| 0.156442
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.4
| 0
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
acc1f5d57300368832fd5db3bd81bb55024f950b
| 200
|
py
|
Python
|
GPyOpt/util/stats.py
|
zhenwendai/GPyOpt
|
fd96875e7ec0cb0f78014d96813ece400648827d
|
[
"BSD-3-Clause"
] | 850
|
2015-05-31T21:12:41.000Z
|
2022-03-24T17:25:37.000Z
|
GPyOpt/util/stats.py
|
lakshaykc/GPyOpt
|
097ba66e81c7e22b5bf9fdbe64fd135753bc4a67
|
[
"BSD-3-Clause"
] | 340
|
2015-09-10T14:08:06.000Z
|
2022-03-28T20:35:26.000Z
|
GPyOpt/util/stats.py
|
lakshaykc/GPyOpt
|
097ba66e81c7e22b5bf9fdbe64fd135753bc4a67
|
[
"BSD-3-Clause"
] | 299
|
2015-07-30T13:18:37.000Z
|
2022-03-22T21:27:31.000Z
|
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#from ..util.general import samples_multidimensional_uniform, multigrid, iroot
import numpy as np
| 33.333333
| 78
| 0.79
| 29
| 200
| 5.37931
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028902
| 0.135
| 200
| 5
| 79
| 40
| 0.872832
| 0.865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
acff259283e1eec5e8740c189d715e031471ec8c
| 1,613
|
py
|
Python
|
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/SRCNN.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | 1
|
2021-10-02T11:03:49.000Z
|
2021-10-02T11:03:49.000Z
|
import math
from torch import nn
import torch
from torch.nn.modules.activation import ReLU
from torchvision import transforms
from PIL import Image
class SRCNN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
class SRCNN_BN(nn.Module):
def __init__(self, in_channel: int = 3):
super(SRCNN_BN, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels=in_channel, out_channels=64,
kernel_size=9, padding=9//2),
nn.ReLU(True),
nn.BatchNorm2d(64),
nn.Conv2d(in_channels=64, out_channels=32,
kernel_size=5, padding=5//2),
nn.ReLU(True),
nn.BatchNorm2d(32),
nn.Conv2d(in_channels=32, out_channels=in_channel,
kernel_size=5, padding=5//2),
nn.ReLU(True),
)
def forward(self, inputs):
return self.body(inputs)
if __name__ == "__main__":
model = SRCNN_BN(3)
img = torch.rand((1, 3, 600, 600))
print(model(img).shape)
| 29.87037
| 62
| 0.568506
| 209
| 1,613
| 4.143541
| 0.248804
| 0.062356
| 0.069284
| 0.124711
| 0.752887
| 0.752887
| 0.727483
| 0.727483
| 0.727483
| 0.727483
| 0
| 0.051444
| 0.313081
| 1,613
| 53
| 63
| 30.433962
| 0.730144
| 0
| 0
| 0.590909
| 0
| 0
| 0.00496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0.045455
| 0.318182
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4a1d3fa2d343712d70873e8247660aa7840c2930
| 58
|
py
|
Python
|
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
from .module3 import foo3 as bar3
def foo():
bar3()
| 9.666667
| 33
| 0.637931
| 9
| 58
| 4.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 0.258621
| 58
| 5
| 34
| 11.6
| 0.767442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5857b7f958ed44156d7ace0958ab3175a5d143a
| 119
|
py
|
Python
|
dsa_stl/testdsa.py
|
aman2000jaiswal14/dsa_stl
|
925485913a783ac7dfa8c59e30b24e5be3f76a2e
|
[
"MIT"
] | null | null | null |
dsa_stl/testdsa.py
|
aman2000jaiswal14/dsa_stl
|
925485913a783ac7dfa8c59e30b24e5be3f76a2e
|
[
"MIT"
] | null | null | null |
dsa_stl/testdsa.py
|
aman2000jaiswal14/dsa_stl
|
925485913a783ac7dfa8c59e30b24e5be3f76a2e
|
[
"MIT"
] | null | null | null |
def test():
print("test successful...")
def update():
print("Updating DSA")
if __name__=='__main__':
pass
| 14.875
| 31
| 0.613445
| 14
| 119
| 4.642857
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201681
| 119
| 8
| 32
| 14.875
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.316667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.166667
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c5a0a8324ab48013a779c91b647b3bd3ef2863a0
| 25,437
|
py
|
Python
|
consultantform/forms.py
|
rajeshgupta14/pathscriptfinal
|
1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2
|
[
"Apache-2.0"
] | null | null | null |
consultantform/forms.py
|
rajeshgupta14/pathscriptfinal
|
1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2
|
[
"Apache-2.0"
] | null | null | null |
consultantform/forms.py
|
rajeshgupta14/pathscriptfinal
|
1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from consultantform.models import Relatedcompany,Article,Backgroundcheck,Backgroundcheckb, Problemsolving, Problemsolvingp, Digitalization, Digitalizationp, Miom, Miomp, Duediligence, Script, Strategy,Duediligencep, Scriptp, Strategyp, Branch,Subsidiary
from myapp.models import Project,Client,User,Product
from django.utils.translation import ugettext_lazy as _
class ArticleForm(forms.ModelForm):#kyc form
founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Article
fields = ('founding_date','headquarter_location',
'areas_served','no_of_employees','type_of_company','type_of_industry','type_of_activity','warehouse_addresses',
'factory_addresses','number_of_owners_and_officers','officers_and_roles',
'registered_address','telephone','email','website',
'services_opted','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7','upload_Doc8','upload_Doc9','upload_Doc10','upload_Doc11','upload_Doc12','notes')
labels = {
'services_opted' : _('Services Opted (Hold Ctrl + select for alternate choices, Hold Shift + select for continuous choices)'),
}
def __init__(self,request,*args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
# self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class BranchForm(forms.ModelForm):#branch form
branch_founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Branch
fields = (
'branch_name','branch_founding_date','branch_location',
'areas_served_by_branch','no_of_employees_in_branch','type_of_business_by_branch',
'number_of_owners_and_officers_in_branch','officers_and_roles_in_branch',
'branch_registered_address','branch_telephone','branch_email','branch_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(BranchForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class SubsidiaryForm(forms.ModelForm):
subsidiary_founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Subsidiary
fields = (
'subsidiary_name','subsidiary_founding_date','subsidiary_location',
'areas_served_by_subsidiary','no_of_employees_in_subsidiary','type_of_business_by_subsidiary',
'subsidiary_warehouse_addresses','subsidiary_factory_addresses','number_of_owners_and_officers_in_subsidiary','officers_and_roles_in_subsidiary',
'subsidiary_registered_address','subsidiary_telephone','subsidiary_email','subsidiary_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(SubsidiaryForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class RelatedcompanyForm(forms.ModelForm):
class Meta:
model = Relatedcompany
fields = (
'related_company_name','relation','related_company_registered_address','related_company_telephone','related_company_email','related_company_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7')
def __init__(self,request,*args, **kwargs):
super(RelatedcompanyForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class BackgroundcheckForm(forms.ModelForm):
class Meta:
model = Backgroundcheck
fields = ('c1','rOC_Certificates','c2','mOA','c3',
'current_List_of_Directors_including_Photo_ID','c4',
'term_Sheets','c5','current_Bankers_and_Auditors_and_Company_Secretary','c6',
'sales_Tax_Registration_Certificate','c7','last_Filed_Sales_Tax_Certificate','c8',
'municipal_Certificate','c9','last_2_years_Audited_Books_of_Accounts','c10',
'last_Paid_Tax_Receipt','c11','employee_List_Statement','c12',
'last_Provident_Fund_Receipt','c13','list_of_Competitors')
labels = {
'company_name': _('Company Name'),
'c1': _('Currently with'),
'rOC_Certificates': _('ROC Certificates'),
'c2': _('Currently with'),
'mOA': _('MOA'),
'c3': _('Currently with'),
'current_List_of_Directors_including_Photo_ID': _('Current List of Directors including Photo ID'),
'c4': _('Currently with'),
'term_Sheets': _('Term Sheets'),
'c5': _('Currently with'),
'current_Bankers_and_Auditors_and_Company_Secretary': _('Current Bankers, Auditors and Company Secretary'),
'c6': _('Currently with'),
'sales_Tax_Registration_Certificate': _('Sales Tax Registration Certificate'),
'c7': _('Currently with'),
'last_Filed_Sales_Tax_Certificate': _('Last Filed Sales Tax Certificate'),
'c8': _('Currently with'),
'municipal_Certificate': _('Municipal Certificate'),
'c9': _('Currently with'),
'last_2_years_Audited_Books_of_Accounts': _('Last 2 years Audited Books of Accounts'),
'c10': _('Currently with'),
'last_Paid_Tax_Receipt': _('Last Paid Tax Receipt'),
'c11': _('Currently with'),
'employee_List_Statement': _('Employee List Statement'),
'c12': _('Currently with'),
'last_Provident_Fund_Receipt': _('last Provident Fund Receipt'),
'c13': _('Currently with'),
'list_of_Competitors': _('List of Competitors'),
}
def __init__(self,request,*args, **kwargs):
super(BackgroundcheckForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(userid=request.user.id)
self.fields['c1'].queryset = User.objects.filter(is_staff=True)
self.fields['c2'].queryset = User.objects.filter(is_staff=True)
self.fields['c3'].queryset = User.objects.filter(is_staff=True)
self.fields['c4'].queryset = User.objects.filter(is_staff=True)
self.fields['c5'].queryset = User.objects.filter(is_staff=True)
self.fields['c6'].queryset = User.objects.filter(is_staff=True)
self.fields['c7'].queryset = User.objects.filter(is_staff=True)
self.fields['c8'].queryset = User.objects.filter(is_staff=True)
self.fields['c9'].queryset = User.objects.filter(is_staff=True)
self.fields['c10'].queryset = User.objects.filter(is_staff=True)
self.fields['c11'].queryset = User.objects.filter(is_staff=True)
self.fields['c12'].queryset = User.objects.filter(is_staff=True)
self.fields['c13'].queryset = User.objects.filter(is_staff=True)
class BackgroundcheckbForm(forms.ModelForm):
class Meta:
model = Backgroundcheckb
fields = ('c1','rOC_Certificates','c2','mOA','c3',
'current_List_of_Directors_including_Photo_ID','c4',
'term_Sheets','c5','current_Bankers_and_Auditors_and_Company_Secretary','c6',
'sales_Tax_Registration_Certificate','c7','last_Filed_Sales_Tax_Certificate','c8',
'municipal_Certificate','c9','last_2_years_Audited_Books_of_Accounts','c10',
'last_Paid_Tax_Receipt','c11','employee_List_Statement','c12',
'last_Provident_Fund_Receipt','c13','list_of_Competitors')
# self.fields['company_name'].queryset = Client.objects.filter(id=User.objects.get(clientid=request.user.clientid).clientid)
class ProjectForm(forms.ModelForm):#create, view project form
class Meta:
model = Project
fields=('name','client','product','user')
def __init__(self,request,*args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
self.fields['client'].queryset = Client.objects.filter(
userid=request.user.id)
class ProductForm(forms.ModelForm):#create , view product form
class Meta:
model = Product
fields=('name','description','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProductForm, self).__init__(*args, **kwargs)
class CustomerForm(forms.ModelForm):#kyc form in client view
class Meta:
model = Article
fields = ('founding_date','headquarter_location',
'areas_served','no_of_employees','type_of_company','type_of_industry','type_of_activity','warehouse_addresses',
'factory_addresses','number_of_owners_and_officers','officers_and_roles',
'registered_address','telephone','email','website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7','upload_Doc8','upload_Doc9','upload_Doc10','upload_Doc11','upload_Doc12')
def __init__(self,request,*args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class DuediligencetForm(forms.ModelForm):#duediligence create form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligence
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligencetForm, self).__init__(*args, **kwargs)
class DuediligenceForm(forms.ModelForm):#duediligence temporary form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligence
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligenceForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
#user=request.user.id)
class DuediligencepForm(forms.ModelForm):#duediligence permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligencep
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligencepForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
#user=request.user.id)
class ScripttForm(forms.ModelForm):#script create form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Script
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScripttForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ScriptForm(forms.ModelForm):#script temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Script
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScriptForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ScriptpForm(forms.ModelForm):#script permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Scriptp
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScriptpForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategytForm(forms.ModelForm):#strategy create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategy
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategytForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategyForm(forms.ModelForm):#strategy temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategy
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategyForm, self).__init__(*args, **kwargs)
# self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategypForm(forms.ModelForm):#strategy permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategyp
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategypForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ProblemSolvingtForm(forms.ModelForm):#problem solving create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolving
fields = ('date','version',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingtForm, self).__init__(*args, **kwargs)
class ProblemSolvingForm(forms.ModelForm):#problem solving temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolving
fields = ('date',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingForm, self).__init__(*args, **kwargs)
class ProblemSolvingpForm(forms.ModelForm):#problem solving permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolvingp
fields = ('date',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingpForm, self).__init__(*args, **kwargs)
class DigitalizationtForm(forms.ModelForm):#digitalization create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalization
fields = ('date','version',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationtForm, self).__init__(*args, **kwargs)
class DigitalizationForm(forms.ModelForm):#digitalization temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalization
fields = ('date',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationForm, self).__init__(*args, **kwargs)
class DigitalizationpForm(forms.ModelForm):#digitalization permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalizationp
fields = ('date',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationpForm, self).__init__(*args, **kwargs)
class MiomtForm(forms.ModelForm):#min of meeting create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miom
fields = ('date','version',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiomtForm, self).__init__(*args, **kwargs)
class MiomForm(forms.ModelForm):#min of meeting temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miom
fields = ('date',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiomForm, self).__init__(*args, **kwargs)
class MiompForm(forms.ModelForm):#min of meeting permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miomp
fields = ('date',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiompForm, self).__init__(*args, **kwargs)
| 52.232033
| 265
| 0.667689
| 2,807
| 25,437
| 5.556466
| 0.109369
| 0.03334
| 0.024235
| 0.030006
| 0.795666
| 0.761685
| 0.756107
| 0.733282
| 0.725396
| 0.686735
| 0
| 0.016917
| 0.223847
| 25,437
| 486
| 266
| 52.339506
| 0.773084
| 0.080788
| 0
| 0.60241
| 0
| 0
| 0.41177
| 0.2825
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078313
| false
| 0
| 0.012048
| 0
| 0.316265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c5d264b666ec206bb38235e6b929269a680d4c3f
| 201
|
py
|
Python
|
moceansdk/modules/command/button/wa_call_button.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
moceansdk/modules/command/button/wa_call_button.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
moceansdk/modules/command/button/wa_call_button.py
|
d3no/mocean-sdk-python
|
cbc215a0eb8aa26c04afb940eab6482f23150c75
|
[
"MIT"
] | null | null | null |
from moceansdk.modules.command.button.wa_button_basic import WaButtonBasic
class WaCallButton(WaButtonBasic):
def type(self):
return "call"
def required_key(self):
return []
| 20.1
| 74
| 0.711443
| 23
| 201
| 6.086957
| 0.782609
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20398
| 201
| 9
| 75
| 22.333333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.019901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a85d9f6adc75abade2df1b9fb782e28ad3153356
| 91
|
py
|
Python
|
Ejercicio3/Gato.py
|
carlotamartin/Ejercicio-de-POO-entrega
|
454362e86ba28b12c7853391fda2820212ab5a70
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio3/Gato.py
|
carlotamartin/Ejercicio-de-POO-entrega
|
454362e86ba28b12c7853391fda2820212ab5a70
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio3/Gato.py
|
carlotamartin/Ejercicio-de-POO-entrega
|
454362e86ba28b12c7853391fda2820212ab5a70
|
[
"Apache-2.0"
] | null | null | null |
from Mamifero import Mamifero
class Gato (Mamifero):
def __init__ (self):
pass
| 18.2
| 29
| 0.681319
| 11
| 91
| 5.272727
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.252747
| 91
| 5
| 30
| 18.2
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a8604acc6869a5557580d6852ac8ca8c051c0cef
| 82
|
py
|
Python
|
build/lib/geonomics/utils/__init__.py
|
AnushaPB/geonomics-1
|
deee0c377e81f509463eaf6f9d0b2f0809f2ddc3
|
[
"MIT"
] | 8
|
2020-08-27T17:06:04.000Z
|
2021-09-17T22:55:07.000Z
|
build/lib/geonomics/utils/__init__.py
|
AnushaPB/geonomics-1
|
deee0c377e81f509463eaf6f9d0b2f0809f2ddc3
|
[
"MIT"
] | null | null | null |
build/lib/geonomics/utils/__init__.py
|
AnushaPB/geonomics-1
|
deee0c377e81f509463eaf6f9d0b2f0809f2ddc3
|
[
"MIT"
] | 2
|
2020-08-28T23:45:28.000Z
|
2021-01-25T21:47:40.000Z
|
from . import io
from . import viz
from . import spatial
from . import _str_repr_
| 16.4
| 24
| 0.756098
| 13
| 82
| 4.538462
| 0.538462
| 0.677966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 82
| 4
| 25
| 20.5
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a87fc71bc99f8160644a8f466ca6c7f7a4dce4ea
| 3,874
|
py
|
Python
|
jmetal/problem/multiobjective/constrained.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | 1
|
2020-05-26T18:57:31.000Z
|
2020-05-26T18:57:31.000Z
|
jmetal/problem/multiobjective/constrained.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | null | null | null |
jmetal/problem/multiobjective/constrained.py
|
LuckysonKhaidem/ProjectAlpha
|
e4b4779a8968a83f1e8add3490a4d2c4ad145d55
|
[
"MIT"
] | 2
|
2019-01-08T11:52:52.000Z
|
2020-05-25T13:21:26.000Z
|
from math import pi, cos, atan
from jmetal.core.solution import FloatSolution
from jmetal.core.problem import FloatProblem
"""
.. module:: constrained
:platform: Unix, Windows
:synopsis: Constrained test problems for multi-objective optimization
.. moduleauthor:: Antonio J. Nebro <antonio@lcc.uma.es>
"""
class Srinivas(FloatProblem):
""" Class representing problem Srinivas. """
def __init__(self, rf_path: str=None):
super(Srinivas, self).__init__(rf_path=rf_path)
self.number_of_objectives = 2
self.number_of_variables = 2
self.number_of_constraints = 2
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
self.lower_bound = [-20.0 for _ in range(self.number_of_variables)]
self.upper_bound = [20.0 for _ in range(self.number_of_variables)]
FloatSolution.lower_bound = self.lower_bound
FloatSolution.upper_bound = self.upper_bound
def evaluate(self, solution: FloatSolution) -> FloatSolution:
x1 = solution.variables[0]
x2 = solution.variables[1]
solution.objectives[0] = 2.0 + (x1 - 2.0) * (x1 - 2.0) + (x2 - 1.0) * (x2 - 1.0)
solution.objectives[1] = 9.0 * x1 - (x2 - 1.0) * (x2 - 1.0)
return solution
def evaluate_constraints(self, solution: FloatSolution) -> None:
constraints = [0.0 for _ in range(self.number_of_constraints)]
x1 = solution.variables[0]
x2 = solution.variables[1]
constraints[0] = 1.0 - (x1 * x1 + x2 * x2) / 225.0
constraints[1] = (3.0 * x2 - x1) / 10.0 - 1.0
overall_constraint_violation = 0.0
number_of_violated_constraints = 0.0
for constrain in constraints:
if constrain < 0.0:
overall_constraint_violation += constrain
number_of_violated_constraints += 1
solution.attributes['overall_constraint_violation'] = overall_constraint_violation
solution.attributes['number_of_violated_constraints'] = number_of_violated_constraints
def get_name(self):
return 'Srinivas'
class Tanaka(FloatProblem):
""" Class representing problem Tanaka """
def __init__(self, rf_path: str=None):
super(Tanaka, self).__init__(rf_path=rf_path)
self.number_of_objectives = 2
self.number_of_variables = 2
self.number_of_constraints = 2
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
self.lower_bound = [10e-5 for _ in range(self.number_of_variables)]
self.upper_bound = [pi for _ in range(self.number_of_variables)]
FloatSolution.lower_bound = self.lower_bound
FloatSolution.upper_bound = self.upper_bound
def evaluate(self, solution: FloatSolution) -> FloatSolution:
solution.objectives[0] = solution.variables[0]
solution.objectives[1] = solution.variables[1]
return solution
def evaluate_constraints(self, solution: FloatSolution) -> None:
constraints = [0.0 for _ in range(self.number_of_constraints)]
x1 = solution.variables[0]
x2 = solution.variables[1]
constraints[0] = (x1 * x1 + x2 * x2 - 1.0 - 0.1 * cos(16.0 * atan(x1 / x2)))
constraints[1] = -2.0 * ((x1 - 0.5) * (x1 - 0.5) + (x2 - 0.5) * (x2 - 0.5) - 0.5)
overall_constraint_violation = 0.0
number_of_violated_constraints = 0.0
for constrain in constraints:
if constrain < 0.0:
overall_constraint_violation += constrain
number_of_violated_constraints += 1
solution.attributes['overall_constraint_violation'] = overall_constraint_violation
solution.attributes['number_of_violated_constraints'] = number_of_violated_constraints
def get_name(self):
return 'Tanaka'
| 34.589286
| 94
| 0.651007
| 485
| 3,874
| 4.969072
| 0.173196
| 0.06639
| 0.059751
| 0.089627
| 0.76473
| 0.749378
| 0.742739
| 0.742739
| 0.702075
| 0.702075
| 0
| 0.042755
| 0.239288
| 3,874
| 111
| 95
| 34.900901
| 0.775025
| 0.018327
| 0
| 0.695652
| 0
| 0
| 0.040556
| 0.032222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.043478
| 0.028986
| 0.246377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a8c4916dbe0cc67a85ce77eda0d08d4f06d98fd1
| 1,086
|
py
|
Python
|
utils/types.py
|
fomula91/todoapp
|
b1a9ca40af92f7bb0f5054760c13c3089f430440
|
[
"MIT"
] | 4
|
2022-03-07T12:37:11.000Z
|
2022-03-13T21:30:26.000Z
|
utils/types.py
|
fomula91/todoapp
|
b1a9ca40af92f7bb0f5054760c13c3089f430440
|
[
"MIT"
] | 3
|
2022-03-09T16:19:24.000Z
|
2022-03-27T15:09:58.000Z
|
utils/types.py
|
fomula91/todoapp
|
b1a9ca40af92f7bb0f5054760c13c3089f430440
|
[
"MIT"
] | 1
|
2022-03-10T23:40:46.000Z
|
2022-03-10T23:40:46.000Z
|
# Annotaion(함수나 클래스의 인자값 또는 반환값의 형태를 알려주기 위해 타입을 지정하는 방법)을 위한 클래스
# bool 타입의 ok
class BooleanOk:
@staticmethod
def __type__():
return bool
# dict 타입의 uses
class DictionaryUser:
@staticmethod
def __type__():
fields = {
"_id": str,
"user_id": str,
"user_name": str,
"user_passwd": str
}
return fields
# dict 타입의 payload
class DictionaryPayload:
@staticmethod
def __type__():
fields = {
"user_id": str,
"user_name": str,
}
return fields
# string 타입의 token
class StringToken:
@staticmethod
def __type__():
return str
# string 타입의 user_id
class StringUserId:
@staticmethod
def __type__():
return str
# string 타입의 message
class StringMessage:
@staticmethod
def __type__():
return str
# string 타입의 objectId
class StringObjectId:
@staticmethod
def __type__():
return str
# list 타입의 words
class ArrayWords:
@staticmethod
def __type__():
return str
| 16.208955
| 65
| 0.586556
| 115
| 1,086
| 5.2
| 0.4
| 0.200669
| 0.254181
| 0.250836
| 0.346154
| 0.252508
| 0.185619
| 0
| 0
| 0
| 0
| 0
| 0.339779
| 1,086
| 66
| 66
| 16.454545
| 0.834031
| 0.180479
| 0
| 0.690476
| 0
| 0
| 0.052273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0.02381
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a8d40ac38a43b09224c3c2a2c47dc0491a3acaa5
| 103
|
py
|
Python
|
terrascript/dnsimple/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/dnsimple/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/dnsimple/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/dnsimple/r.py
import terrascript
class dnsimple_record(terrascript.Resource):
pass
| 14.714286
| 44
| 0.796117
| 12
| 103
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126214
| 103
| 6
| 45
| 17.166667
| 0.9
| 0.242718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
76592530eed6588636cb3a63be38df604139394b
| 88
|
py
|
Python
|
VPhys_MET.py
|
JCCPort/Tests
|
821e7df2426e964d67b59d7fd32eba5796930a84
|
[
"Apache-2.0"
] | null | null | null |
VPhys_MET.py
|
JCCPort/Tests
|
821e7df2426e964d67b59d7fd32eba5796930a84
|
[
"Apache-2.0"
] | null | null | null |
VPhys_MET.py
|
JCCPort/Tests
|
821e7df2426e964d67b59d7fd32eba5796930a84
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import scipy as sc
import math
import numba as nb
| 17.6
| 19
| 0.806818
| 18
| 88
| 3.944444
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193182
| 88
| 5
| 20
| 17.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76b8982e2d78d698128fa58c2163c2287af41b75
| 140
|
py
|
Python
|
sphinxcontrib/needs/api/__init__.py
|
gregegg/sphinxcontrib-needs
|
b0c10a44756bb8f16313dcf52e17fd87cf47e780
|
[
"MIT"
] | 1
|
2021-12-31T03:55:12.000Z
|
2021-12-31T03:55:12.000Z
|
sphinxcontrib/needs/api/__init__.py
|
gregegg/sphinxcontrib-needs
|
b0c10a44756bb8f16313dcf52e17fd87cf47e780
|
[
"MIT"
] | null | null | null |
sphinxcontrib/needs/api/__init__.py
|
gregegg/sphinxcontrib-needs
|
b0c10a44756bb8f16313dcf52e17fd87cf47e780
|
[
"MIT"
] | 1
|
2021-12-31T03:55:44.000Z
|
2021-12-31T03:55:44.000Z
|
from .configuration import get_need_types, add_need_type, add_extra_option, add_dynamic_function
from .need import add_need, make_hashed_id
| 46.666667
| 96
| 0.871429
| 23
| 140
| 4.826087
| 0.652174
| 0.126126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 140
| 2
| 97
| 70
| 0.867188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4f93660a90fbe86460d75b52aff46460d5f1a182
| 37
|
py
|
Python
|
classification_scale.py
|
jrctrabuco/Assay-Multiparameter-Dash
|
54c9a4b8818a75db1a7a4607c1532440fda34ec3
|
[
"MIT"
] | null | null | null |
classification_scale.py
|
jrctrabuco/Assay-Multiparameter-Dash
|
54c9a4b8818a75db1a7a4607c1532440fda34ec3
|
[
"MIT"
] | null | null | null |
classification_scale.py
|
jrctrabuco/Assay-Multiparameter-Dash
|
54c9a4b8818a75db1a7a4607c1532440fda34ec3
|
[
"MIT"
] | null | null | null |
#Scale for classification of Devices
| 18.5
| 36
| 0.837838
| 5
| 37
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 1
| 37
| 37
| 0.96875
| 0.945946
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
96e92ec98ec68d59fcae79863a30d219d38ea4c1
| 273
|
py
|
Python
|
speedysvc/serialisation/ArrowSerialisation.py
|
mcyph/shmrpc
|
4e0e972657f677a845eb6e7acbf788535c07117a
|
[
"Unlicense",
"MIT"
] | 4
|
2020-02-11T04:20:57.000Z
|
2021-06-20T10:03:52.000Z
|
speedysvc/serialisation/ArrowSerialisation.py
|
mcyph/shmrpc
|
4e0e972657f677a845eb6e7acbf788535c07117a
|
[
"Unlicense",
"MIT"
] | 1
|
2020-09-16T23:18:30.000Z
|
2020-09-21T10:07:22.000Z
|
speedysvc/serialisation/ArrowSerialisation.py
|
mcyph/shmrpc
|
4e0e972657f677a845eb6e7acbf788535c07117a
|
[
"Unlicense",
"MIT"
] | null | null | null |
import pyarrow
class ArrowSerialisation:
"""
TODO!
"""
mimetype = 'application/octet-stream'
@staticmethod
def dumps(o):
return pyarrow.serialize(o).to_buffer()
@staticmethod
def loads(o):
return pyarrow.deserialize(o)
| 14.368421
| 47
| 0.619048
| 27
| 273
| 6.222222
| 0.703704
| 0.178571
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.271062
| 273
| 18
| 48
| 15.166667
| 0.844221
| 0.018315
| 0
| 0.222222
| 0
| 0
| 0.095238
| 0.095238
| 0
| 0
| 0
| 0.055556
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.222222
| 0.777778
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8c4c30ab6088043d02ff78f28b99f66e1a557792
| 175
|
py
|
Python
|
maximum-depth-of-binary-tree/Solution.6583982.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | null | null | null |
maximum-depth-of-binary-tree/Solution.6583982.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | 1
|
2016-09-11T22:26:17.000Z
|
2016-09-13T01:49:48.000Z
|
maximum-depth-of-binary-tree/Solution.6583982.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | null | null | null |
class Solution:
def maxDepth(self, root):
if root is None:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| 25
| 76
| 0.582857
| 23
| 175
| 4.434783
| 0.652174
| 0.235294
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.314286
| 175
| 6
| 77
| 29.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4ffd57362eafe3d62bf32eaaabf954e3fa4fe4ba
| 72
|
py
|
Python
|
parser.py
|
danesjenovdan/ajdovscina-parser
|
b28dd46d6d0379d47f0508b5c7bb29f87f5bb8e1
|
[
"CC0-1.0"
] | null | null | null |
parser.py
|
danesjenovdan/ajdovscina-parser
|
b28dd46d6d0379d47f0508b5c7bb29f87f5bb8e1
|
[
"CC0-1.0"
] | null | null | null |
parser.py
|
danesjenovdan/ajdovscina-parser
|
b28dd46d6d0379d47f0508b5c7bb29f87f5bb8e1
|
[
"CC0-1.0"
] | null | null | null |
from parlaparser.parser import Parser
parser = Parser()
parser.parse()
| 14.4
| 37
| 0.777778
| 9
| 72
| 6.222222
| 0.555556
| 0.642857
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 72
| 4
| 38
| 18
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8b413ea1f58f2f4341b112b997adc24b0819de7d
| 1,168
|
py
|
Python
|
tests/utilities/test_sha3.py
|
Arachnid/web3.py
|
4a0b4adc292981958c899ae731ee60014fd94775
|
[
"MIT"
] | 4
|
2018-02-04T22:06:20.000Z
|
2021-04-14T22:09:43.000Z
|
tests/utilities/test_sha3.py
|
gkapkowski/web3.py
|
cd0cf580119e4afa41c511eb35ee31840a2fd321
|
[
"MIT"
] | null | null | null |
tests/utilities/test_sha3.py
|
gkapkowski/web3.py
|
cd0cf580119e4afa41c511eb35ee31840a2fd321
|
[
"MIT"
] | 1
|
2018-10-04T09:13:28.000Z
|
2018-10-04T09:13:28.000Z
|
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize(
'value,expected,encoding',
(
(
'',
'0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470',
None,
),
(
'test123',
'0xf81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad',
None,
),
(
'test(int)',
'0xf4d03772bec1e62fbe8c5691e1a9101e520e8f8b5ca612123694632bf3cb51b1',
None,
),
(
'0x80',
'0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421',
'hex',
),
(
'0x80',
'0x6b03a5eef7706e3fb52a61c19ab1122fad7237726601ac665bd4def888f0e4a0',
None,
),
(
'0x3c9229289a6125f7fdf1885a77bb12c37a8d3b4962d936f7e3084dece32a3ca1',
'0x82ff40c0a986c6a5cfad4ddf4c3aa6996f1a7837f9c398e17e5de5cbd5a12b28',
'hex',
)
)
)
def test_sha3(web3, value, expected, encoding):
actual = web3.sha3(value, encoding=encoding)
assert expected == actual
| 26.545455
| 81
| 0.589041
| 47
| 1,168
| 14.510638
| 0.617021
| 0.038123
| 0.061584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.379974
| 0.333048
| 1,168
| 43
| 82
| 27.162791
| 0.495507
| 0
| 0
| 0.325
| 0
| 0
| 0.440925
| 0.41524
| 0
| 0
| 0.402397
| 0
| 0.025
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.075
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8b45f05cfe6f673b7b32233c7415eeca2ed5d2ba
| 38
|
py
|
Python
|
ssk/helpers/__init__.py
|
jobliz/solid-state-kinetics
|
c5767b400b19bd0256c806001664f0b369718bab
|
[
"MIT"
] | 2
|
2017-03-08T21:32:11.000Z
|
2017-07-19T03:27:18.000Z
|
ssk/helpers/__init__.py
|
jobliz/solid-state-kinetics
|
c5767b400b19bd0256c806001664f0b369718bab
|
[
"MIT"
] | null | null | null |
ssk/helpers/__init__.py
|
jobliz/solid-state-kinetics
|
c5767b400b19bd0256c806001664f0b369718bab
|
[
"MIT"
] | null | null | null |
from api import *
from excel import *
| 12.666667
| 19
| 0.736842
| 6
| 38
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 20
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8ca1958105bef5127b31f8fbf44e21e0f54e3042
| 12,458
|
py
|
Python
|
win_animation.py
|
corbanmailloux/MysteryMansion
|
21086550bcd9de7f6df440bc62b80ece2cc0156f
|
[
"MIT"
] | 1
|
2021-09-16T17:31:36.000Z
|
2021-09-16T17:31:36.000Z
|
win_animation.py
|
corbanmailloux/MysteryMansion
|
21086550bcd9de7f6df440bc62b80ece2cc0156f
|
[
"MIT"
] | 7
|
2016-01-10T06:39:19.000Z
|
2022-01-16T15:25:00.000Z
|
win_animation.py
|
corbanmailloux/MysteryMansion
|
21086550bcd9de7f6df440bc62b80ece2cc0156f
|
[
"MIT"
] | 1
|
2016-03-29T22:27:01.000Z
|
2016-03-29T22:27:01.000Z
|
"""Win animation frames for Mystery Mansion.
Animation from: http://www.angelfire.com/ca/mathcool/fireworks.html
"""
win_animation = [
"""
.|
| |
|'| ._____
___ | | |. |' .---"|
_ .-' '-. | | .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.|
| |
|'| ' ._____
___ | | . |. |' .---"|
_ .-' '-. | | . .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.| _\\/_
| | /\\
|'| ' ._____
___ | | . |. |' .---"|
_ .-' '-. | | . .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
* *
.| *_\\/_*
| | * /\\ *
|'| * * ._____
___ | | |. |' .---"|
_ .-' '-. | | .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
* *
.| * *
| | * * _\\/_
_\\/_ |'| * * /\\ ._____
/\\ ___ | | |. |' .---"|
_ .-' '-. | | .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
* *
_\\/_ .| * * .::.
.''. /\\ | | * :_\\/_:
:_\\/_: |'| * * : /\\ :_____
: /\\ :___ | | o '::'|. |' .---"|
_ '..-' '-. | | .--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.''. *
:_\\/_: .| .::.
.''.: /\\ : | | : :
: :'..' |'| \\'/ : :_____
: :___ | | = o = '::'|. |' .---"|
_ '..-' '-. | | /.\\.--'| || | _| |
.-'| _.| | || '-__ | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
_\\)/_
.''. /(\\
: : .| _\\/_
.''.: : | | : /\\
: :'..' |'|'.\\'/.' ._____
: :___ | |-= o =- |. |' .---"|
_ '..-' '-. | |.'/.\\:--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.
_\\)/_
.''. /(\\ .''.
: : .| ' :_\\/_:
: : | | : : /\\ :
'..' |'|'. ' .' '..'._____
___ | |-= =- |. |' .---"|
_ .-' '-. | |.' . :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
_\\/_ .''.
/\\ .| : :
| | : :
|'| '..'._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
\\'/
* * = o =
*_\\/_* /.\\ .''.
* /\\ * .| : :
* * | | : :
|'| '..'._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
'.\\'/.'
* * -= o =-
* * .'/.\\'.
* * .| :
* * | |
|'| ._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
'.\\'/.'
-= =-
o .'/.\\'.
o .| :
| | .:.
|'| ':' ._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
'. ' .'
\\'/ - -
\\'/ = o = .' . '.
= o = /.\\ .| : .:::.
/.\\ | | :::::::
|'| ':::'_____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
:
: '.\\'/.'
'.\\'/.'-= o =- .:::.
-= o =-.'/.\\'..| :::::::
.'/.\\'. : | | :::::::
: |'| ':::'_____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
:
: '.\\'/.'
'.\\'/.'-= =- * .:::.
-= =-.'/.\\'..| ::' '::
.'/.\\'. : | | ::. .::
: |'| ':::'_____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
: .
: '. ' .' _\\)/_
'. ' .'- - /(\\ .'''.
- -.' . '..| ' : :
.' . '. : | | : :
: |'| '...'_____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.
_\\)/_ _\\/_
/(\\ _\\/_ /\\
.| ' /\\
| |
|'| ._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
. .''.
_\\)/_ .''. :_\\/_:
/(\\ :_\\/_:: /\\ :
.| ' : /\\ : '..'
o | | '..'
|'| ._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.''.
.''. : :
_\\/_ : :: :
\\'/ .| /\\ : : '..'
= o = | | _\\/_ '..'
/.\\ |'| /\\ ._____
___ | | |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.''.
: :_\\/_:
'.\\'/.' .| : /\\.:'.
-= o =- | | '.:_\\/_:
.'/.\\'. |'| : /\\ : ._____
:__ | | '..' |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.''.
: : :
'.\\'/.' .| : .:'.
-= =- | | '.: :
.'/.\\'. |'| : : ._____
:__ | | '..' |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""",
"""
.''.
: : :
'. ' .' .| : .:'.
- - | | '.: :
.' . '. |'| : : ._____
:__ | | '..' |. |' .---"|
_ .-' '-. | | :--'| || | _| |
.-'| _.| | || '-__: | | | || |
|' | |. | || | | | | || |
___| '-' ' "" '-' '-.' '` |____
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""]
| 33.31016
| 67
| 0.050088
| 32
| 12,458
| 3.625
| 0.5
| 0.224138
| 0.310345
| 0.37931
| 0.12069
| 0.12069
| 0.12069
| 0.12069
| 0.12069
| 0.12069
| 0
| 0
| 0.602745
| 12,458
| 373
| 68
| 33.399464
| 0.023439
| 0.00883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8cab05020b7533aeed8137c8d563d8752e3ca0fe
| 659
|
py
|
Python
|
example_1/Batuhan/workspace/build/meturone_egitim/cmake/meturone_egitim-genmsg-context.py
|
tekmen0/ROS-intro
|
7f85bcfc2e8897ac80a045c40682698563418ab1
|
[
"MIT"
] | 3
|
2020-09-11T08:14:10.000Z
|
2020-09-27T14:58:30.000Z
|
example_1/Batuhan/workspace/build/meturone_egitim/cmake/meturone_egitim-genmsg-context.py
|
tekmen0/ROS-intro
|
7f85bcfc2e8897ac80a045c40682698563418ab1
|
[
"MIT"
] | 1
|
2020-09-11T08:09:31.000Z
|
2020-09-11T08:09:31.000Z
|
example_1/Batuhan/workspace/build/meturone_egitim/cmake/meturone_egitim-genmsg-context.py
|
tekmen0/ROS-intro
|
7f85bcfc2e8897ac80a045c40682698563418ab1
|
[
"MIT"
] | 9
|
2020-09-10T21:57:17.000Z
|
2021-02-23T15:17:43.000Z
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/tekmen0/meturone_egitim/src/meturone_egitim/msg/Dummy.msg;/home/tekmen0/meturone_egitim/src/meturone_egitim/msg/answer.msg"
services_str = ""
pkg_name = "meturone_egitim"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "meturone_egitim;/home/tekmen0/meturone_egitim/src/meturone_egitim/msg;std_msgs;/opt/ros/noetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python3"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/noetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 54.916667
| 148
| 0.798179
| 96
| 659
| 5.197917
| 0.520833
| 0.224449
| 0.114228
| 0.150301
| 0.270541
| 0.270541
| 0.270541
| 0.270541
| 0
| 0
| 0
| 0.0064
| 0.051593
| 659
| 11
| 149
| 59.909091
| 0.792
| 0.074355
| 0
| 0
| 1
| 0.222222
| 0.666118
| 0.595395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
506d04e3bbb3c998df47b52d997c24ec5437c976
| 196
|
py
|
Python
|
service/models.py
|
acslaszlo/docker-test
|
6fd0fd290292a45aea8f87afa05db45e5d0eaf02
|
[
"MIT"
] | null | null | null |
service/models.py
|
acslaszlo/docker-test
|
6fd0fd290292a45aea8f87afa05db45e5d0eaf02
|
[
"MIT"
] | 4
|
2018-11-26T08:15:06.000Z
|
2018-11-30T06:47:05.000Z
|
service/models.py
|
acslaszlo/docker-test
|
6fd0fd290292a45aea8f87afa05db45e5d0eaf02
|
[
"MIT"
] | null | null | null |
from flywheel import Field, Model
class Data(Model):
id = Field(data_type=str, hash_key=True)
val1 = Field(data_type=str)
val2 = Field(data_type=int)
val3 = Field(data_type=str)
| 21.777778
| 44
| 0.693878
| 31
| 196
| 4.225806
| 0.548387
| 0.274809
| 0.396947
| 0.366412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018987
| 0.193878
| 196
| 8
| 45
| 24.5
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
508bc2080641d573b31146ad67e0647857e93fc9
| 158
|
py
|
Python
|
python-web/FORM/form_workshop/form_workshop/create_form/urls.py
|
yosif88/SoftUni
|
ca1778ae9eb796b82e8d9f5882b6e7fdb0a96372
|
[
"MIT"
] | null | null | null |
python-web/FORM/form_workshop/form_workshop/create_form/urls.py
|
yosif88/SoftUni
|
ca1778ae9eb796b82e8d9f5882b6e7fdb0a96372
|
[
"MIT"
] | null | null | null |
python-web/FORM/form_workshop/form_workshop/create_form/urls.py
|
yosif88/SoftUni
|
ca1778ae9eb796b82e8d9f5882b6e7fdb0a96372
|
[
"MIT"
] | null | null | null |
from django.urls import path
from form_workshop.create_form.views import show_form_data
urlpatterns = [
path('', show_form_data, name='show form')
]
| 26.333333
| 59
| 0.746835
| 23
| 158
| 4.869565
| 0.565217
| 0.214286
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158228
| 158
| 6
| 60
| 26.333333
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.058442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
509428202f6cba98dd60d77f3200afddf9bb51a2
| 156
|
py
|
Python
|
audioPlayer.py
|
Razpudding/rpi-duckling
|
c2a240c148f0a1188a2563b74a053549a08b6ab8
|
[
"MIT"
] | null | null | null |
audioPlayer.py
|
Razpudding/rpi-duckling
|
c2a240c148f0a1188a2563b74a053549a08b6ab8
|
[
"MIT"
] | null | null | null |
audioPlayer.py
|
Razpudding/rpi-duckling
|
c2a240c148f0a1188a2563b74a053549a08b6ab8
|
[
"MIT"
] | null | null | null |
import pygame
pygame.mixer.init()
pygame.mixer.music.load("myFile.wav")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
| 22.285714
| 44
| 0.75
| 23
| 156
| 5.043478
| 0.608696
| 0.37931
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 156
| 6
| 45
| 26
| 0.816901
| 0
| 0
| 0
| 0
| 0
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50e4b2049bb0de21aed66f361ce51ab77caa0e72
| 124
|
py
|
Python
|
modules/__init__.py
|
tinnguyen96/partition-coupling
|
1078171465e61bee5bca3d2c2f1bf0fc86c0d865
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
tinnguyen96/partition-coupling
|
1078171465e61bee5bca3d2c2f1bf0fc86c0d865
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
tinnguyen96/partition-coupling
|
1078171465e61bee5bca3d2c2f1bf0fc86c0d865
|
[
"MIT"
] | null | null | null |
# todos
# add a compress function (triple Mode can create over 1000 files in a directory, which
# put strain on Supercloud).
| 41.333333
| 87
| 0.766129
| 20
| 124
| 4.75
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.177419
| 124
| 3
| 88
| 41.333333
| 0.892157
| 0.951613
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.333333
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50eb8d14e83cb5896661f256781852fadb4fc98e
| 88
|
py
|
Python
|
push_deployment.py
|
eyalatox/Tableau-dummy
|
7bdab8dc5c078a35b2667445fcfdbe3babf14244
|
[
"MIT"
] | null | null | null |
push_deployment.py
|
eyalatox/Tableau-dummy
|
7bdab8dc5c078a35b2667445fcfdbe3babf14244
|
[
"MIT"
] | null | null | null |
push_deployment.py
|
eyalatox/Tableau-dummy
|
7bdab8dc5c078a35b2667445fcfdbe3babf14244
|
[
"MIT"
] | null | null | null |
import os
def validate_helm_chart(helm_chart):
os.system(f'helm lint {helm_chart}')
| 22
| 40
| 0.761364
| 15
| 88
| 4.2
| 0.6
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 88
| 4
| 40
| 22
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.247191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0fdc4d769d0236a4f41312871e0cf34c6fb76151
| 4,198
|
py
|
Python
|
tests/generate_examples.py
|
hsolbrig/avidreader
|
71d98c91a7954b3aa3e2a7fe20a0f23c66e7bdb9
|
[
"CC0-1.0"
] | 3
|
2021-02-18T18:32:25.000Z
|
2021-02-19T19:59:13.000Z
|
tests/generate_examples.py
|
hsolbrig/avidreader
|
71d98c91a7954b3aa3e2a7fe20a0f23c66e7bdb9
|
[
"CC0-1.0"
] | 10
|
2021-02-19T16:50:20.000Z
|
2021-05-09T22:14:25.000Z
|
tests/generate_examples.py
|
hsolbrig/hbreader
|
71d98c91a7954b3aa3e2a7fe20a0f23c66e7bdb9
|
[
"CC0-1.0"
] | null | null | null |
import os
from hbreader import FileInfo, hbopen, hbread
# This removes any absolute paths from the output -- not generally used
FileInfo.rel_offset = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
# Open a vanilla file
metadata = FileInfo()
with hbopen('../tests/data/test data 1.txt', metadata) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
#
# FileInfo(source_file='hbreader/tests/data/test data 1.txt', source_file_date='Wed Feb 17 17:01:09 2021', source_file_size=28, base_path='hbreader/tests/data')
# Open a file using a base address
data_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/data'))
with hbopen('test data 1.txt', base_path=data_file_dir) as f:
print(f.read())
# I'm some friendly test data
# Open an absolute URL
FileInfo.rel_offset = None
url = "https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt"
with hbopen("https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt", metadata.clear()) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
#
# FileInfo(source_file='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test%20data%201.txt', source_file_date='Thu, 18 Feb 2021 16:02:50 GMT', source_file_size='28', base_path='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data')
# Open a relative URL
base_address = metadata.base_path
print(f"Base: {base_address}")
# Base: https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data
with hbopen('test data 1.txt', base_path=base_address) as f:
print(f.read())
# I'm some friendly test data
# Open a file handle
with open('../tests/data/test data 1.txt') as fhandle:
with hbopen(fhandle, metadata.clear()) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
# FileInfo(source_file='../tests/data/test data 1.txt', source_file_date='Wed Feb 17 17:01:09 2021', source_file_size=28, base_path='../tests/data')
# Open an 'latin-1' encoded file
with hbopen('test_8859.txt', base_path=data_file_dir, read_codec='latin-1') as f:
print(f.read())
# Some Text With weird ÒtextÓ And single ÔquotesÕ
# Open a bytes file handle -- still reads as text
with open('data/test data 1.txt', 'rb') as fhandle:
with hbopen(fhandle) as f:
print(f.read())
# I'm some friendly test data
# Open a block of text as a file
some_text = """
This is the honey badger. Watch it run in slow motion.
It's pretty badass. Look. It runs all over the place. "Whoa! Watch out!" says that bird.
Eew, it's got a snake! Oh! It's chasing a jackal! Oh my gosh!
Oh, the honey badger is just crazy!
The honey badger has been referred to by the Guiness Book of World Records as the most fearless animal in the animal kingdom. It really doesn't give a shit. If it's hungry, it's hungry.
"""
with hbopen(some_text, metadata.clear()) as f:
print(f.read())
print(metadata)
#
# This is the honey badger. Watch it run in slow motion.
#
# It's pretty badass. Look. It runs all over the place. "Whoa! Watch out!" says that bird.
#
# Eew, it's got a snake! Oh! It's chasing a jackal! Oh my gosh!
#
# Oh, the honey badger is just crazy!
#
# The honey badger has been referred to by the Guiness Book of World Records as the most fearless animal in the animal kingdom. It really doesn't give a shit. If it's hungry, it's hungry.
# hbopen doesn't require 'with'
f = hbopen('l1\nl2\nl3\n')
for l in f:
print(l, end='')
f.close()
# l1
# l2
# l3
# hpread returns the content rather than a file handle
print(hbread('test_8859.txt', base_path=data_file_dir, read_codec='latin-1'))
# Some Text With weird ÒtextÓ And single ÔquotesÕ
print(hbread("https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt", metadata.clear()))
# I'm some friendly test data
print(metadata)
# FileInfo(source_file='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test%20data%201.txt', source_file_date='Thu, 18 Feb 2021 16:28:37 GMT', source_file_size='28', base_path='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data')
| 40.757282
| 271
| 0.726298
| 715
| 4,198
| 4.186014
| 0.226573
| 0.045439
| 0.03007
| 0.040094
| 0.781824
| 0.754761
| 0.736719
| 0.72703
| 0.700301
| 0.670565
| 0
| 0.025884
| 0.144116
| 4,198
| 102
| 272
| 41.156863
| 0.807125
| 0.483087
| 0
| 0.288889
| 0
| 0.133333
| 0.426491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.044444
| 0.377778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ba00b0922bc7761f169afaef7c623049b3f97d57
| 43
|
py
|
Python
|
hive_attention_tokens/chain/consensus/peers.py
|
imwatsi/hive-attention-tokens
|
87b02b1b6fa6dc75f2cdf25d92f0a79cbeeb7e5f
|
[
"MIT"
] | null | null | null |
hive_attention_tokens/chain/consensus/peers.py
|
imwatsi/hive-attention-tokens
|
87b02b1b6fa6dc75f2cdf25d92f0a79cbeeb7e5f
|
[
"MIT"
] | null | null | null |
hive_attention_tokens/chain/consensus/peers.py
|
imwatsi/hive-attention-tokens
|
87b02b1b6fa6dc75f2cdf25d92f0a79cbeeb7e5f
|
[
"MIT"
] | null | null | null |
"""Consensus on the state of peer nodes."""
| 43
| 43
| 0.697674
| 7
| 43
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 1
| 43
| 43
| 0.810811
| 0.860465
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ba1f1dc0a769a993fc3bd73fd9dcffbfd685a82e
| 568
|
py
|
Python
|
Utils/RandamData.py
|
yasserfaraazkhan/Selenium-Python-Pytest
|
748f01809b8b6ff1e89973d67ffd36514f6b77b0
|
[
"MIT"
] | null | null | null |
Utils/RandamData.py
|
yasserfaraazkhan/Selenium-Python-Pytest
|
748f01809b8b6ff1e89973d67ffd36514f6b77b0
|
[
"MIT"
] | null | null | null |
Utils/RandamData.py
|
yasserfaraazkhan/Selenium-Python-Pytest
|
748f01809b8b6ff1e89973d67ffd36514f6b77b0
|
[
"MIT"
] | null | null | null |
import random
class Utils():
@classmethod
def _get_random_alphanumeric_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK123456') for i in range(5))
@classmethod
def _get_random_numeric_string(cls):
return ''.join(random.choice('1234567890') for i in range(10))
@classmethod
def _get_random_five_number_string(cls):
return ''.join(random.choice('123456789') for i in range(5))
@classmethod
def _get_random_alphabetic_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK') for i in range(5))
| 28.4
| 77
| 0.690141
| 73
| 568
| 5.136986
| 0.369863
| 0.149333
| 0.181333
| 0.245333
| 0.549333
| 0.517333
| 0.186667
| 0.186667
| 0.186667
| 0
| 0
| 0.065359
| 0.191901
| 568
| 19
| 78
| 29.894737
| 0.751634
| 0
| 0
| 0.285714
| 0
| 0
| 0.08642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.071429
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ba1ff244633d82344157c37f23fa167df1410ade
| 175
|
py
|
Python
|
tests/context.py
|
itsdaveba/rubik-solver
|
eebae6cffc9f91e64d5f3e49d556a78df0e703f5
|
[
"MIT"
] | null | null | null |
tests/context.py
|
itsdaveba/rubik-solver
|
eebae6cffc9f91e64d5f3e49d556a78df0e703f5
|
[
"MIT"
] | null | null | null |
tests/context.py
|
itsdaveba/rubik-solver
|
eebae6cffc9f91e64d5f3e49d556a78df0e703f5
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import rubik_solver
from rubik_solver.defs import available_moves
| 29.166667
| 83
| 0.765714
| 28
| 175
| 4.535714
| 0.571429
| 0.141732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.102857
| 175
| 6
| 84
| 29.166667
| 0.802548
| 0
| 0
| 0
| 0
| 0
| 0.011696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e87031dd4f9df978b67ef30e680bfb04430b26ab
| 91
|
py
|
Python
|
pl2.py
|
kwadrat/pl_py
|
ed8526df6dd813ae028c37ca07c8ba03cd11a5b2
|
[
"MIT"
] | null | null | null |
pl2.py
|
kwadrat/pl_py
|
ed8526df6dd813ae028c37ca07c8ba03cd11a5b2
|
[
"MIT"
] | null | null | null |
pl2.py
|
kwadrat/pl_py
|
ed8526df6dd813ae028c37ca07c8ba03cd11a5b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import common_pl
if __name__ == '__main__':
common_pl.main()
| 11.375
| 26
| 0.681319
| 13
| 91
| 4
| 0.769231
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.175824
| 91
| 7
| 27
| 13
| 0.68
| 0.230769
| 0
| 0
| 0
| 0
| 0.115942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e87d602b972abbd672a9e5b351bdfbb7549eb780
| 34
|
py
|
Python
|
Kalecgos/config.py
|
Raka-loah/Kalecgos
|
2d63c2b01af0beecac1c270830dd32d4bdef4153
|
[
"MIT"
] | 1
|
2020-09-29T09:47:48.000Z
|
2020-09-29T09:47:48.000Z
|
Kalecgos/config.py
|
Raka-loah/Kalecgos
|
2d63c2b01af0beecac1c270830dd32d4bdef4153
|
[
"MIT"
] | null | null | null |
Kalecgos/config.py
|
Raka-loah/Kalecgos
|
2d63c2b01af0beecac1c270830dd32d4bdef4153
|
[
"MIT"
] | null | null | null |
base_url = 'http://127.0.0.1:5700'
| 34
| 34
| 0.647059
| 8
| 34
| 2.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 0.058824
| 34
| 1
| 34
| 34
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e8a4be889e29281377e2d7a5e30c1bf5c4ae440c
| 158
|
py
|
Python
|
tweetable/admin.py
|
kkiyama117/django-HP
|
90c25a6d0597abf364f5b51ca2cd192cf8b998a2
|
[
"Apache-2.0"
] | 1
|
2020-12-08T16:22:36.000Z
|
2020-12-08T16:22:36.000Z
|
tweetable/admin.py
|
kkiyama117/django-HP
|
90c25a6d0597abf364f5b51ca2cd192cf8b998a2
|
[
"Apache-2.0"
] | 44
|
2018-04-09T02:30:30.000Z
|
2018-10-15T15:53:43.000Z
|
tweetable/admin.py
|
kkiyama117/django-HP
|
90c25a6d0597abf364f5b51ca2cd192cf8b998a2
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import *
class TrainAdmin(admin.ModelAdmin):
pass
admin.site.register(User)
admin.site.register(Tweet)
| 13.166667
| 35
| 0.765823
| 21
| 158
| 5.761905
| 0.666667
| 0.14876
| 0.280992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139241
| 158
| 11
| 36
| 14.363636
| 0.889706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
e8abfc2f97490e8723a87588d02e8cd7dc932294
| 1,397
|
py
|
Python
|
fibonacci/test_fibonacci.py
|
codenameyau/python-recursion
|
4067a48abe9f9ec56c6fbfcbcc573b7677419394
|
[
"MIT"
] | null | null | null |
fibonacci/test_fibonacci.py
|
codenameyau/python-recursion
|
4067a48abe9f9ec56c6fbfcbcc573b7677419394
|
[
"MIT"
] | null | null | null |
fibonacci/test_fibonacci.py
|
codenameyau/python-recursion
|
4067a48abe9f9ec56c6fbfcbcc573b7677419394
|
[
"MIT"
] | 1
|
2018-09-19T13:45:13.000Z
|
2018-09-19T13:45:13.000Z
|
import unittest
import fibonacci
class TestFibonacci(unittest.TestCase):
def test_fib(self):
self.assertEqual(fibonacci.fib(1), 1)
self.assertEqual(fibonacci.fib(2), 1)
self.assertEqual(fibonacci.fib(3), 2)
self.assertEqual(fibonacci.fib(4), 3)
self.assertEqual(fibonacci.fib(5), 5)
self.assertEqual(fibonacci.fib(6), 8)
self.assertEqual(fibonacci.fib(7), 13)
self.assertEqual(fibonacci.fib(8), 21)
def test_fib_rec(self):
self.assertEqual(fibonacci.fib_rec(1), 1)
self.assertEqual(fibonacci.fib_rec(2), 1)
self.assertEqual(fibonacci.fib_rec(3), 2)
self.assertEqual(fibonacci.fib_rec(4), 3)
self.assertEqual(fibonacci.fib_rec(5), 5)
self.assertEqual(fibonacci.fib_rec(6), 8)
self.assertEqual(fibonacci.fib_rec(7), 13)
self.assertEqual(fibonacci.fib_rec(8), 21)
def test_fib_binet(self):
self.assertEqual(fibonacci.fib_binet(1), 1)
self.assertEqual(fibonacci.fib_binet(2), 1)
self.assertEqual(fibonacci.fib_binet(3), 2)
self.assertEqual(fibonacci.fib_binet(4), 3)
self.assertEqual(fibonacci.fib_binet(5), 5)
self.assertEqual(fibonacci.fib_binet(6), 8)
self.assertEqual(fibonacci.fib_binet(7), 13)
self.assertEqual(fibonacci.fib_binet(8), 21)
if __name__ == '__main__':
unittest.main()
| 36.763158
| 52
| 0.665712
| 187
| 1,397
| 4.818182
| 0.139037
| 0.399556
| 0.63929
| 0.719201
| 0.882353
| 0.697003
| 0
| 0
| 0
| 0
| 0
| 0.048257
| 0.198998
| 1,397
| 37
| 53
| 37.756757
| 0.756926
| 0
| 0
| 0
| 0
| 0
| 0.005727
| 0
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.